FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
videotoolboxenc.c
Go to the documentation of this file.
1 /*
2  * copyright (c) 2015 Rick Kern <kernrj@gmail.com>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <VideoToolbox/VideoToolbox.h>
22 #include <CoreVideo/CoreVideo.h>
23 #include <CoreMedia/CoreMedia.h>
24 #include <TargetConditionals.h>
25 #include <Availability.h>
26 #include "avcodec.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/avassert.h"
29 #include "libavutil/atomic.h"
30 #include "libavutil/avstring.h"
31 #include "libavcodec/avcodec.h"
32 #include "libavutil/pixdesc.h"
33 #include "internal.h"
34 #include <pthread.h>
35 #include "h264.h"
36 #include "h264_sei.h"
37 
38 #if !CONFIG_VT_BT2020
39 # define kCVImageBufferColorPrimaries_ITU_R_2020 CFSTR("ITU_R_2020")
40 # define kCVImageBufferTransferFunction_ITU_R_2020 CFSTR("ITU_R_2020")
41 # define kCVImageBufferYCbCrMatrix_ITU_R_2020 CFSTR("ITU_R_2020")
42 #endif
43 
44 typedef enum VT_H264Profile {
51 
52 typedef enum VTH264Entropy{
57 
58 static const uint8_t start_code[] = { 0, 0, 0, 1 };
59 
60 typedef struct ExtraSEI {
61  void *data;
62  size_t size;
63 } ExtraSEI;
64 
65 typedef struct BufNode {
66  CMSampleBufferRef cm_buffer;
68  struct BufNode* next;
69  int error;
70 } BufNode;
71 
72 typedef struct VTEncContext {
73  AVClass *class;
74  VTCompressionSessionRef session;
75  CFStringRef ycbcr_matrix;
76  CFStringRef color_primaries;
77  CFStringRef transfer_function;
78 
81 
83 
86 
87  int64_t frame_ct_out;
88  int64_t frame_ct_in;
89 
90  int64_t first_pts;
91  int64_t dts_delta;
92 
93  int64_t profile;
94  int64_t level;
95  int64_t entropy;
96  int64_t realtime;
97  int64_t frames_before;
98  int64_t frames_after;
99 
100  int64_t allow_sw;
101 
102  bool flushing;
105  bool a53_cc;
106 } VTEncContext;
107 
108 static int vtenc_populate_extradata(AVCodecContext *avctx,
109  CMVideoCodecType codec_type,
110  CFStringRef profile_level,
111  CFNumberRef gamma_level,
112  CFDictionaryRef enc_info,
113  CFDictionaryRef pixel_buffer_info);
114 
115 /**
116  * NULL-safe release of *refPtr, and sets value to NULL.
117  */
118 static void vt_release_num(CFNumberRef* refPtr){
119  if (!*refPtr) {
120  return;
121  }
122 
123  CFRelease(*refPtr);
124  *refPtr = NULL;
125 }
126 
127 static void set_async_error(VTEncContext *vtctx, int err)
128 {
129  BufNode *info;
130 
131  pthread_mutex_lock(&vtctx->lock);
132 
133  vtctx->async_error = err;
134 
135  info = vtctx->q_head;
136  vtctx->q_head = vtctx->q_tail = NULL;
137 
138  while (info) {
139  BufNode *next = info->next;
140  CFRelease(info->cm_buffer);
141  av_free(info);
142  info = next;
143  }
144 
145  pthread_mutex_unlock(&vtctx->lock);
146 }
147 
148 static void clear_frame_queue(VTEncContext *vtctx)
149 {
150  set_async_error(vtctx, 0);
151 }
152 
153 static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
154 {
155  BufNode *info;
156 
157  pthread_mutex_lock(&vtctx->lock);
158 
159  if (vtctx->async_error) {
160  pthread_mutex_unlock(&vtctx->lock);
161  return vtctx->async_error;
162  }
163 
164  if (vtctx->flushing && vtctx->frame_ct_in == vtctx->frame_ct_out) {
165  *buf = NULL;
166 
167  pthread_mutex_unlock(&vtctx->lock);
168  return 0;
169  }
170 
171  while (!vtctx->q_head && !vtctx->async_error && wait) {
172  pthread_cond_wait(&vtctx->cv_sample_sent, &vtctx->lock);
173  }
174 
175  if (!vtctx->q_head) {
176  pthread_mutex_unlock(&vtctx->lock);
177  *buf = NULL;
178  return 0;
179  }
180 
181  info = vtctx->q_head;
182  vtctx->q_head = vtctx->q_head->next;
183  if (!vtctx->q_head) {
184  vtctx->q_tail = NULL;
185  }
186 
187  pthread_mutex_unlock(&vtctx->lock);
188 
189  *buf = info->cm_buffer;
190  if (sei && *buf) {
191  *sei = info->sei;
192  } else if (info->sei) {
193  if (info->sei->data) av_free(info->sei->data);
194  av_free(info->sei);
195  }
196  av_free(info);
197 
198  vtctx->frame_ct_out++;
199 
200  return 0;
201 }
202 
203 static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
204 {
205  BufNode *info = av_malloc(sizeof(BufNode));
206  if (!info) {
207  set_async_error(vtctx, AVERROR(ENOMEM));
208  return;
209  }
210 
211  CFRetain(buffer);
212  info->cm_buffer = buffer;
213  info->sei = sei;
214  info->next = NULL;
215 
216  pthread_mutex_lock(&vtctx->lock);
218 
219  if (!vtctx->q_head) {
220  vtctx->q_head = info;
221  } else {
222  vtctx->q_tail->next = info;
223  }
224 
225  vtctx->q_tail = info;
226 
227  pthread_mutex_unlock(&vtctx->lock);
228 }
229 
230 static int count_nalus(size_t length_code_size,
231  CMSampleBufferRef sample_buffer,
232  int *count)
233 {
234  size_t offset = 0;
235  int status;
236  int nalu_ct = 0;
237  uint8_t size_buf[4];
238  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
239  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
240 
241  if (length_code_size > 4)
242  return AVERROR_INVALIDDATA;
243 
244  while (offset < src_size) {
245  size_t curr_src_len;
246  size_t box_len = 0;
247  size_t i;
248 
249  status = CMBlockBufferCopyDataBytes(block,
250  offset,
251  length_code_size,
252  size_buf);
253 
254  for (i = 0; i < length_code_size; i++) {
255  box_len <<= 8;
256  box_len |= size_buf[i];
257  }
258 
259  curr_src_len = box_len + length_code_size;
260  offset += curr_src_len;
261 
262  nalu_ct++;
263  }
264 
265  *count = nalu_ct;
266  return 0;
267 }
268 
269 static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
270 {
271  switch (id) {
272  case AV_CODEC_ID_H264: return kCMVideoCodecType_H264;
273  default: return 0;
274  }
275 }
276 
277 /**
278  * Get the parameter sets from a CMSampleBufferRef.
279  * @param dst If *dst isn't NULL, the parameters are copied into existing
280  * memory. *dst_size must be set accordingly when *dst != NULL.
281  * If *dst is NULL, it will be allocated.
282  * In all cases, *dst_size is set to the number of bytes used starting
283  * at *dst.
284  */
285 static int get_params_size(
286  AVCodecContext *avctx,
287  CMVideoFormatDescriptionRef vid_fmt,
288  size_t *size)
289 {
290  size_t total_size = 0;
291  size_t ps_count;
292  int is_count_bad = 0;
293  size_t i;
294  int status;
295  status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
296  0,
297  NULL,
298  NULL,
299  &ps_count,
300  NULL);
301  if (status) {
302  is_count_bad = 1;
303  ps_count = 0;
304  status = 0;
305  }
306 
307  for (i = 0; i < ps_count || is_count_bad; i++) {
308  const uint8_t *ps;
309  size_t ps_size;
310  status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
311  i,
312  &ps,
313  &ps_size,
314  NULL,
315  NULL);
316  if (status) {
317  /*
318  * When ps_count is invalid, status != 0 ends the loop normally
319  * unless we didn't get any parameter sets.
320  */
321  if (i > 0 && is_count_bad) status = 0;
322 
323  break;
324  }
325 
326  total_size += ps_size + sizeof(start_code);
327  }
328 
329  if (status) {
330  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set sizes: %d\n", status);
331  return AVERROR_EXTERNAL;
332  }
333 
334  *size = total_size;
335  return 0;
336 }
337 
338 static int copy_param_sets(
339  AVCodecContext *avctx,
340  CMVideoFormatDescriptionRef vid_fmt,
341  uint8_t *dst,
342  size_t dst_size)
343 {
344  size_t ps_count;
345  int is_count_bad = 0;
346  int status;
347  size_t offset = 0;
348  size_t i;
349 
350  status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
351  0,
352  NULL,
353  NULL,
354  &ps_count,
355  NULL);
356  if (status) {
357  is_count_bad = 1;
358  ps_count = 0;
359  status = 0;
360  }
361 
362 
363  for (i = 0; i < ps_count || is_count_bad; i++) {
364  const uint8_t *ps;
365  size_t ps_size;
366  size_t next_offset;
367 
368  status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
369  i,
370  &ps,
371  &ps_size,
372  NULL,
373  NULL);
374  if (status) {
375  if (i > 0 && is_count_bad) status = 0;
376 
377  break;
378  }
379 
380  next_offset = offset + sizeof(start_code) + ps_size;
381  if (dst_size < next_offset) {
382  av_log(avctx, AV_LOG_ERROR, "Error: buffer too small for parameter sets.\n");
384  }
385 
386  memcpy(dst + offset, start_code, sizeof(start_code));
387  offset += sizeof(start_code);
388 
389  memcpy(dst + offset, ps, ps_size);
390  offset = next_offset;
391  }
392 
393  if (status) {
394  av_log(avctx, AV_LOG_ERROR, "Error getting parameter set data: %d\n", status);
395  return AVERROR_EXTERNAL;
396  }
397 
398  return 0;
399 }
400 
401 static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
402 {
403  CMVideoFormatDescriptionRef vid_fmt;
404  size_t total_size;
405  int status;
406 
407  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
408  if (!vid_fmt) {
409  av_log(avctx, AV_LOG_ERROR, "No video format.\n");
410  return AVERROR_EXTERNAL;
411  }
412 
413  status = get_params_size(avctx, vid_fmt, &total_size);
414  if (status) {
415  av_log(avctx, AV_LOG_ERROR, "Could not get parameter sets.\n");
416  return status;
417  }
418 
419  avctx->extradata = av_mallocz(total_size + AV_INPUT_BUFFER_PADDING_SIZE);
420  if (!avctx->extradata) {
421  return AVERROR(ENOMEM);
422  }
423  avctx->extradata_size = total_size;
424 
425  status = copy_param_sets(avctx, vid_fmt, avctx->extradata, total_size);
426 
427  if (status) {
428  av_log(avctx, AV_LOG_ERROR, "Could not copy param sets.\n");
429  return status;
430  }
431 
432  return 0;
433 }
434 
436  void *ctx,
437  void *sourceFrameCtx,
438  OSStatus status,
439  VTEncodeInfoFlags flags,
440  CMSampleBufferRef sample_buffer)
441 {
442  AVCodecContext *avctx = ctx;
443  VTEncContext *vtctx = avctx->priv_data;
444  ExtraSEI *sei = sourceFrameCtx;
445 
446  if (vtctx->async_error) {
447  if(sample_buffer) CFRelease(sample_buffer);
448  return;
449  }
450 
451  if (status || !sample_buffer) {
452  av_log(avctx, AV_LOG_ERROR, "Error encoding frame: %d\n", (int)status);
454  return;
455  }
456 
457  if (!avctx->extradata && (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
458  int set_status = set_extradata(avctx, sample_buffer);
459  if (set_status) {
460  set_async_error(vtctx, set_status);
461  return;
462  }
463  }
464 
465  vtenc_q_push(vtctx, sample_buffer, sei);
466 }
467 
469  AVCodecContext *avctx,
470  CMSampleBufferRef sample_buffer,
471  size_t *size)
472 {
473  CMVideoFormatDescriptionRef vid_fmt;
474  int isize;
475  int status;
476 
477  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
478  if (!vid_fmt) {
479  av_log(avctx, AV_LOG_ERROR, "Error getting buffer format description.\n");
480  return AVERROR_EXTERNAL;
481  }
482 
483  status = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(vid_fmt,
484  0,
485  NULL,
486  NULL,
487  NULL,
488  &isize);
489  if (status) {
490  av_log(avctx, AV_LOG_ERROR, "Error getting length code size: %d\n", status);
491  return AVERROR_EXTERNAL;
492  }
493 
494  *size = isize;
495  return 0;
496 }
497 
498 /*
499  * Returns true on success.
500  *
501  * If profile_level_val is NULL and this method returns true, don't specify the
502  * profile/level to the encoder.
503  */
505  CFStringRef *profile_level_val)
506 {
507  VTEncContext *vtctx = avctx->priv_data;
508  int64_t profile = vtctx->profile;
509 
510  if (profile == H264_PROF_AUTO && vtctx->level) {
511  //Need to pick a profile if level is not auto-selected.
512  profile = vtctx->has_b_frames ? H264_PROF_MAIN : H264_PROF_BASELINE;
513  }
514 
515  *profile_level_val = NULL;
516 
517  switch (profile) {
518  case H264_PROF_AUTO:
519  return true;
520 
521  case H264_PROF_BASELINE:
522  switch (vtctx->level) {
523  case 0: *profile_level_val = kVTProfileLevel_H264_Baseline_AutoLevel; break;
524  case 13: *profile_level_val = kVTProfileLevel_H264_Baseline_1_3; break;
525  case 30: *profile_level_val = kVTProfileLevel_H264_Baseline_3_0; break;
526  case 31: *profile_level_val = kVTProfileLevel_H264_Baseline_3_1; break;
527  case 32: *profile_level_val = kVTProfileLevel_H264_Baseline_3_2; break;
528  case 40: *profile_level_val = kVTProfileLevel_H264_Baseline_4_0; break;
529  case 41: *profile_level_val = kVTProfileLevel_H264_Baseline_4_1; break;
530  case 42: *profile_level_val = kVTProfileLevel_H264_Baseline_4_2; break;
531  case 50: *profile_level_val = kVTProfileLevel_H264_Baseline_5_0; break;
532  case 51: *profile_level_val = kVTProfileLevel_H264_Baseline_5_1; break;
533  case 52: *profile_level_val = kVTProfileLevel_H264_Baseline_5_2; break;
534  }
535  break;
536 
537  case H264_PROF_MAIN:
538  switch (vtctx->level) {
539  case 0: *profile_level_val = kVTProfileLevel_H264_Main_AutoLevel; break;
540  case 30: *profile_level_val = kVTProfileLevel_H264_Main_3_0; break;
541  case 31: *profile_level_val = kVTProfileLevel_H264_Main_3_1; break;
542  case 32: *profile_level_val = kVTProfileLevel_H264_Main_3_2; break;
543  case 40: *profile_level_val = kVTProfileLevel_H264_Main_4_0; break;
544  case 41: *profile_level_val = kVTProfileLevel_H264_Main_4_1; break;
545  case 42: *profile_level_val = kVTProfileLevel_H264_Main_4_2; break;
546  case 50: *profile_level_val = kVTProfileLevel_H264_Main_5_0; break;
547  case 51: *profile_level_val = kVTProfileLevel_H264_Main_5_1; break;
548  case 52: *profile_level_val = kVTProfileLevel_H264_Main_5_2; break;
549  }
550  break;
551 
552  case H264_PROF_HIGH:
553  switch (vtctx->level) {
554  case 0: *profile_level_val = kVTProfileLevel_H264_High_AutoLevel; break;
555  case 30: *profile_level_val = kVTProfileLevel_H264_High_3_0; break;
556  case 31: *profile_level_val = kVTProfileLevel_H264_High_3_1; break;
557  case 32: *profile_level_val = kVTProfileLevel_H264_High_3_2; break;
558  case 40: *profile_level_val = kVTProfileLevel_H264_High_4_0; break;
559  case 41: *profile_level_val = kVTProfileLevel_H264_High_4_1; break;
560  case 42: *profile_level_val = kVTProfileLevel_H264_High_4_2; break;
561  case 50: *profile_level_val = kVTProfileLevel_H264_High_5_0; break;
562  case 51: *profile_level_val = kVTProfileLevel_H264_High_5_1; break;
563  case 52: *profile_level_val = kVTProfileLevel_H264_High_5_2; break;
564  }
565  break;
566  }
567 
568  if (!*profile_level_val) {
569  av_log(avctx, AV_LOG_ERROR, "Invalid Profile/Level.\n");
570  return false;
571  }
572 
573  return true;
574 }
575 
577  enum AVPixelFormat fmt,
578  enum AVColorRange range,
579  int* av_pixel_format,
580  int* range_guessed)
581 {
582  if (range_guessed) *range_guessed = range != AVCOL_RANGE_MPEG &&
583  range != AVCOL_RANGE_JPEG;
584 
585  //MPEG range is used when no range is set
586  if (fmt == AV_PIX_FMT_NV12) {
587  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
588  kCVPixelFormatType_420YpCbCr8BiPlanarFullRange :
589  kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
590  } else if (fmt == AV_PIX_FMT_YUV420P) {
591  *av_pixel_format = range == AVCOL_RANGE_JPEG ?
592  kCVPixelFormatType_420YpCbCr8PlanarFullRange :
593  kCVPixelFormatType_420YpCbCr8Planar;
594  } else {
595  return AVERROR(EINVAL);
596  }
597 
598  return 0;
599 }
600 
601 static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict) {
602  VTEncContext *vtctx = avctx->priv_data;
603 
604  if (vtctx->color_primaries) {
605  CFDictionarySetValue(dict,
606  kCVImageBufferColorPrimariesKey,
607  vtctx->color_primaries);
608  }
609 
610  if (vtctx->transfer_function) {
611  CFDictionarySetValue(dict,
612  kCVImageBufferTransferFunctionKey,
613  vtctx->transfer_function);
614  }
615 
616  if (vtctx->ycbcr_matrix) {
617  CFDictionarySetValue(dict,
618  kCVImageBufferYCbCrMatrixKey,
619  vtctx->ycbcr_matrix);
620  }
621 }
622 
624  CFMutableDictionaryRef* dict)
625 {
626  CFNumberRef cv_color_format_num = NULL;
627  CFNumberRef width_num = NULL;
628  CFNumberRef height_num = NULL;
629  CFMutableDictionaryRef pixel_buffer_info = NULL;
630  int cv_color_format;
631  int status = get_cv_pixel_format(avctx,
632  avctx->pix_fmt,
633  avctx->color_range,
634  &cv_color_format,
635  NULL);
636  if (status) return status;
637 
638  pixel_buffer_info = CFDictionaryCreateMutable(
639  kCFAllocatorDefault,
640  20,
641  &kCFCopyStringDictionaryKeyCallBacks,
642  &kCFTypeDictionaryValueCallBacks);
643 
644  if (!pixel_buffer_info) goto pbinfo_nomem;
645 
646  cv_color_format_num = CFNumberCreate(kCFAllocatorDefault,
647  kCFNumberSInt32Type,
648  &cv_color_format);
649  if (!cv_color_format_num) goto pbinfo_nomem;
650 
651  CFDictionarySetValue(pixel_buffer_info,
652  kCVPixelBufferPixelFormatTypeKey,
653  cv_color_format_num);
654  vt_release_num(&cv_color_format_num);
655 
656  width_num = CFNumberCreate(kCFAllocatorDefault,
657  kCFNumberSInt32Type,
658  &avctx->width);
659  if (!width_num) return AVERROR(ENOMEM);
660 
661  CFDictionarySetValue(pixel_buffer_info,
662  kCVPixelBufferWidthKey,
663  width_num);
664  vt_release_num(&width_num);
665 
666  height_num = CFNumberCreate(kCFAllocatorDefault,
667  kCFNumberSInt32Type,
668  &avctx->height);
669  if (!height_num) goto pbinfo_nomem;
670 
671  CFDictionarySetValue(pixel_buffer_info,
672  kCVPixelBufferHeightKey,
673  height_num);
674  vt_release_num(&height_num);
675 
676  add_color_attr(avctx, pixel_buffer_info);
677 
678  *dict = pixel_buffer_info;
679  return 0;
680 
681 pbinfo_nomem:
682  vt_release_num(&cv_color_format_num);
683  vt_release_num(&width_num);
684  vt_release_num(&height_num);
685  if (pixel_buffer_info) CFRelease(pixel_buffer_info);
686 
687  return AVERROR(ENOMEM);
688 }
689 
691  CFStringRef *primaries)
692 {
693  enum AVColorPrimaries pri = avctx->color_primaries;
694  switch (pri) {
696  *primaries = NULL;
697  break;
698 
699  case AVCOL_PRI_BT709:
700  *primaries = kCVImageBufferColorPrimaries_ITU_R_709_2;
701  break;
702 
703  case AVCOL_PRI_BT2020:
705  break;
706 
707  default:
708  av_log(avctx, AV_LOG_ERROR, "Color primaries %s is not supported.\n", av_color_primaries_name(pri));
709  *primaries = NULL;
710  return -1;
711  }
712 
713  return 0;
714 }
715 
717  CFStringRef *transfer_fnc,
718  CFNumberRef *gamma_level)
719 {
720  enum AVColorTransferCharacteristic trc = avctx->color_trc;
721  Float32 gamma;
722  *gamma_level = NULL;
723 
724  switch (trc) {
726  *transfer_fnc = NULL;
727  break;
728 
729  case AVCOL_TRC_BT709:
730  *transfer_fnc = kCVImageBufferTransferFunction_ITU_R_709_2;
731  break;
732 
733  case AVCOL_TRC_SMPTE240M:
734  *transfer_fnc = kCVImageBufferTransferFunction_SMPTE_240M_1995;
735  break;
736 
737  case AVCOL_TRC_GAMMA22:
738  gamma = 2.2;
739  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
740  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
741  break;
742 
743  case AVCOL_TRC_GAMMA28:
744  gamma = 2.8;
745  *transfer_fnc = kCVImageBufferTransferFunction_UseGamma;
746  *gamma_level = CFNumberCreate(NULL, kCFNumberFloat32Type, &gamma);
747  break;
748 
749  case AVCOL_TRC_BT2020_10:
750  case AVCOL_TRC_BT2020_12:
752  break;
753 
754  default:
755  av_log(avctx, AV_LOG_ERROR, "Transfer function %s is not supported.\n", av_color_transfer_name(trc));
756  return -1;
757  }
758 
759  return 0;
760 }
761 
762 static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix) {
763  switch(avctx->colorspace) {
764  case AVCOL_SPC_BT709:
765  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_709_2;
766  break;
767 
769  *matrix = NULL;
770  break;
771 
772  case AVCOL_SPC_BT470BG:
773  case AVCOL_SPC_SMPTE170M:
774  *matrix = kCVImageBufferYCbCrMatrix_ITU_R_601_4;
775  break;
776 
777  case AVCOL_SPC_SMPTE240M:
778  *matrix = kCVImageBufferYCbCrMatrix_SMPTE_240M_1995;
779  break;
780 
783  break;
784 
785  default:
786  av_log(avctx, AV_LOG_ERROR, "Color space %s is not supported.\n", av_color_space_name(avctx->colorspace));
787  return -1;
788  }
789 
790  return 0;
791 }
792 
794  CMVideoCodecType codec_type,
795  CFStringRef profile_level,
796  CFNumberRef gamma_level,
797  CFDictionaryRef enc_info,
798  CFDictionaryRef pixel_buffer_info,
799  VTCompressionSessionRef *session)
800 {
801  VTEncContext *vtctx = avctx->priv_data;
802  SInt32 bit_rate = avctx->bit_rate;
803  CFNumberRef bit_rate_num;
804 
805  int status = VTCompressionSessionCreate(kCFAllocatorDefault,
806  avctx->width,
807  avctx->height,
808  codec_type,
809  enc_info,
810  pixel_buffer_info,
811  kCFAllocatorDefault,
813  avctx,
814  session);
815 
816  if (status || !vtctx->session) {
817  av_log(avctx, AV_LOG_ERROR, "Error: cannot create compression session: %d\n", status);
818 
819 #if !TARGET_OS_IPHONE
820  if (!vtctx->allow_sw) {
821  av_log(avctx, AV_LOG_ERROR, "Try -allow_sw 1. The hardware encoder may be busy, or not supported.\n");
822  }
823 #endif
824 
825  return AVERROR_EXTERNAL;
826  }
827 
828  bit_rate_num = CFNumberCreate(kCFAllocatorDefault,
829  kCFNumberSInt32Type,
830  &bit_rate);
831  if (!bit_rate_num) return AVERROR(ENOMEM);
832 
833  status = VTSessionSetProperty(vtctx->session,
834  kVTCompressionPropertyKey_AverageBitRate,
835  bit_rate_num);
836  CFRelease(bit_rate_num);
837 
838  if (status) {
839  av_log(avctx, AV_LOG_ERROR, "Error setting bitrate property: %d\n", status);
840  return AVERROR_EXTERNAL;
841  }
842 
843  if (profile_level) {
844  status = VTSessionSetProperty(vtctx->session,
845  kVTCompressionPropertyKey_ProfileLevel,
846  profile_level);
847  if (status) {
848  av_log(avctx, AV_LOG_ERROR, "Error setting profile/level property: %d\n", status);
849  return AVERROR_EXTERNAL;
850  }
851  }
852 
853  if (avctx->gop_size > 0) {
854  CFNumberRef interval = CFNumberCreate(kCFAllocatorDefault,
855  kCFNumberIntType,
856  &avctx->gop_size);
857  if (!interval) {
858  return AVERROR(ENOMEM);
859  }
860 
861  status = VTSessionSetProperty(vtctx->session,
862  kVTCompressionPropertyKey_MaxKeyFrameInterval,
863  interval);
864  CFRelease(interval);
865 
866  if (status) {
867  av_log(avctx, AV_LOG_ERROR, "Error setting 'max key-frame interval' property: %d\n", status);
868  return AVERROR_EXTERNAL;
869  }
870  }
871 
872  if (vtctx->frames_before) {
873  status = VTSessionSetProperty(vtctx->session,
874  kVTCompressionPropertyKey_MoreFramesBeforeStart,
875  kCFBooleanTrue);
876 
877  if (status == kVTPropertyNotSupportedErr) {
878  av_log(avctx, AV_LOG_WARNING, "frames_before property is not supported on this device. Ignoring.\n");
879  } else if (status) {
880  av_log(avctx, AV_LOG_ERROR, "Error setting frames_before property: %d\n", status);
881  }
882  }
883 
884  if (vtctx->frames_after) {
885  status = VTSessionSetProperty(vtctx->session,
886  kVTCompressionPropertyKey_MoreFramesAfterEnd,
887  kCFBooleanTrue);
888 
889  if (status == kVTPropertyNotSupportedErr) {
890  av_log(avctx, AV_LOG_WARNING, "frames_after property is not supported on this device. Ignoring.\n");
891  } else if (status) {
892  av_log(avctx, AV_LOG_ERROR, "Error setting frames_after property: %d\n", status);
893  }
894  }
895 
896  if (avctx->sample_aspect_ratio.num != 0) {
897  CFNumberRef num;
898  CFNumberRef den;
899  CFMutableDictionaryRef par;
900  AVRational *avpar = &avctx->sample_aspect_ratio;
901 
902  av_reduce(&avpar->num, &avpar->den,
903  avpar->num, avpar->den,
904  0xFFFFFFFF);
905 
906  num = CFNumberCreate(kCFAllocatorDefault,
907  kCFNumberIntType,
908  &avpar->num);
909 
910  den = CFNumberCreate(kCFAllocatorDefault,
911  kCFNumberIntType,
912  &avpar->den);
913 
914 
915 
916  par = CFDictionaryCreateMutable(kCFAllocatorDefault,
917  2,
918  &kCFCopyStringDictionaryKeyCallBacks,
919  &kCFTypeDictionaryValueCallBacks);
920 
921  if (!par || !num || !den) {
922  if (par) CFRelease(par);
923  if (num) CFRelease(num);
924  if (den) CFRelease(den);
925 
926  return AVERROR(ENOMEM);
927  }
928 
929  CFDictionarySetValue(
930  par,
931  kCMFormatDescriptionKey_PixelAspectRatioHorizontalSpacing,
932  num);
933 
934  CFDictionarySetValue(
935  par,
936  kCMFormatDescriptionKey_PixelAspectRatioVerticalSpacing,
937  den);
938 
939  status = VTSessionSetProperty(vtctx->session,
940  kVTCompressionPropertyKey_PixelAspectRatio,
941  par);
942 
943  CFRelease(par);
944  CFRelease(num);
945  CFRelease(den);
946 
947  if (status) {
948  av_log(avctx,
949  AV_LOG_ERROR,
950  "Error setting pixel aspect ratio to %d:%d: %d.\n",
951  avctx->sample_aspect_ratio.num,
952  avctx->sample_aspect_ratio.den,
953  status);
954 
955  return AVERROR_EXTERNAL;
956  }
957  }
958 
959 
960  if (vtctx->transfer_function) {
961  status = VTSessionSetProperty(vtctx->session,
962  kVTCompressionPropertyKey_TransferFunction,
963  vtctx->transfer_function);
964 
965  if (status) {
966  av_log(avctx, AV_LOG_WARNING, "Could not set transfer function: %d\n", status);
967  }
968  }
969 
970 
971  if (vtctx->ycbcr_matrix) {
972  status = VTSessionSetProperty(vtctx->session,
973  kVTCompressionPropertyKey_YCbCrMatrix,
974  vtctx->ycbcr_matrix);
975 
976  if (status) {
977  av_log(avctx, AV_LOG_WARNING, "Could not set ycbcr matrix: %d\n", status);
978  }
979  }
980 
981 
982  if (vtctx->color_primaries) {
983  status = VTSessionSetProperty(vtctx->session,
984  kVTCompressionPropertyKey_ColorPrimaries,
985  vtctx->color_primaries);
986 
987  if (status) {
988  av_log(avctx, AV_LOG_WARNING, "Could not set color primaries: %d\n", status);
989  }
990  }
991 
992  if (gamma_level) {
993  status = VTSessionSetProperty(vtctx->session,
994  kCVImageBufferGammaLevelKey,
995  gamma_level);
996 
997  if (status) {
998  av_log(avctx, AV_LOG_WARNING, "Could not set gamma level: %d\n", status);
999  }
1000  }
1001 
1002  if (!vtctx->has_b_frames) {
1003  status = VTSessionSetProperty(vtctx->session,
1004  kVTCompressionPropertyKey_AllowFrameReordering,
1005  kCFBooleanFalse);
1006 
1007  if (status) {
1008  av_log(avctx, AV_LOG_ERROR, "Error setting 'allow frame reordering' property: %d\n", status);
1009  return AVERROR_EXTERNAL;
1010  }
1011  }
1012 
1013  if (vtctx->entropy != VT_ENTROPY_NOT_SET) {
1014  CFStringRef entropy = vtctx->entropy == VT_CABAC ?
1015  kVTH264EntropyMode_CABAC:
1016  kVTH264EntropyMode_CAVLC;
1017 
1018  status = VTSessionSetProperty(vtctx->session,
1019  kVTCompressionPropertyKey_H264EntropyMode,
1020  entropy);
1021 
1022  if (status) {
1023  av_log(avctx, AV_LOG_ERROR, "Error setting entropy property: %d\n", status);
1024  return AVERROR_EXTERNAL;
1025  }
1026  }
1027 
1028  if (vtctx->realtime) {
1029  status = VTSessionSetProperty(vtctx->session,
1030  kVTCompressionPropertyKey_RealTime,
1031  kCFBooleanTrue);
1032 
1033  if (status) {
1034  av_log(avctx, AV_LOG_ERROR, "Error setting realtime property: %d\n", status);
1035  }
1036  }
1037 
1038  status = VTCompressionSessionPrepareToEncodeFrames(vtctx->session);
1039  if (status) {
1040  av_log(avctx, AV_LOG_ERROR, "Error: cannot prepare encoder: %d\n", status);
1041  return AVERROR_EXTERNAL;
1042  }
1043 
1044  return 0;
1045 }
1046 
1048 {
1049  CFMutableDictionaryRef enc_info;
1050  CFMutableDictionaryRef pixel_buffer_info;
1051  CMVideoCodecType codec_type;
1052  VTEncContext *vtctx = avctx->priv_data;
1053  CFStringRef profile_level;
1054  CFBooleanRef has_b_frames_cfbool;
1055  CFNumberRef gamma_level = NULL;
1056  int status;
1057 
1058  codec_type = get_cm_codec_type(avctx->codec_id);
1059  if (!codec_type) {
1060  av_log(avctx, AV_LOG_ERROR, "Error: no mapping for AVCodecID %d\n", avctx->codec_id);
1061  return AVERROR(EINVAL);
1062  }
1063 
1064  vtctx->has_b_frames = avctx->max_b_frames > 0;
1065  if(vtctx->has_b_frames && vtctx->profile == H264_PROF_BASELINE){
1066  av_log(avctx, AV_LOG_WARNING, "Cannot use B-frames with baseline profile. Output will not contain B-frames.\n");
1067  vtctx->has_b_frames = false;
1068  }
1069 
1070  if (vtctx->entropy == VT_CABAC && vtctx->profile == H264_PROF_BASELINE) {
1071  av_log(avctx, AV_LOG_WARNING, "CABAC entropy requires 'main' or 'high' profile, but baseline was requested. Encode will not use CABAC entropy.\n");
1072  vtctx->entropy = VT_ENTROPY_NOT_SET;
1073  }
1074 
1075  if (!get_vt_profile_level(avctx, &profile_level)) return AVERROR(EINVAL);
1076 
1077  vtctx->session = NULL;
1078 
1079  enc_info = CFDictionaryCreateMutable(
1080  kCFAllocatorDefault,
1081  20,
1082  &kCFCopyStringDictionaryKeyCallBacks,
1083  &kCFTypeDictionaryValueCallBacks
1084  );
1085 
1086  if (!enc_info) return AVERROR(ENOMEM);
1087 
1088 #if !TARGET_OS_IPHONE
1089  if (!vtctx->allow_sw) {
1090  CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_RequireHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
1091  } else {
1092  CFDictionarySetValue(enc_info, kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, kCFBooleanTrue);
1093  }
1094 #endif
1095 
1096  if (avctx->pix_fmt != AV_PIX_FMT_VIDEOTOOLBOX) {
1097  status = create_cv_pixel_buffer_info(avctx, &pixel_buffer_info);
1098  if (status)
1099  goto init_cleanup;
1100  } else {
1101  pixel_buffer_info = NULL;
1102  }
1103 
1104  pthread_mutex_init(&vtctx->lock, NULL);
1106  vtctx->dts_delta = vtctx->has_b_frames ? -1 : 0;
1107 
1108  get_cv_transfer_function(avctx, &vtctx->transfer_function, &gamma_level);
1109  get_cv_ycbcr_matrix(avctx, &vtctx->ycbcr_matrix);
1110  get_cv_color_primaries(avctx, &vtctx->color_primaries);
1111 
1112 
1113  if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
1114  status = vtenc_populate_extradata(avctx,
1115  codec_type,
1116  profile_level,
1117  gamma_level,
1118  enc_info,
1119  pixel_buffer_info);
1120  if (status)
1121  goto init_cleanup;
1122  }
1123 
1124  status = vtenc_create_encoder(avctx,
1125  codec_type,
1126  profile_level,
1127  gamma_level,
1128  enc_info,
1129  pixel_buffer_info,
1130  &vtctx->session);
1131 
1132  if (status < 0)
1133  goto init_cleanup;
1134 
1135  status = VTSessionCopyProperty(vtctx->session,
1136  kVTCompressionPropertyKey_AllowFrameReordering,
1137  kCFAllocatorDefault,
1138  &has_b_frames_cfbool);
1139 
1140  if (!status) {
1141  //Some devices don't output B-frames for main profile, even if requested.
1142  vtctx->has_b_frames = CFBooleanGetValue(has_b_frames_cfbool);
1143  CFRelease(has_b_frames_cfbool);
1144  }
1145  avctx->has_b_frames = vtctx->has_b_frames;
1146 
1147 init_cleanup:
1148  if (gamma_level)
1149  CFRelease(gamma_level);
1150 
1151  if (pixel_buffer_info)
1152  CFRelease(pixel_buffer_info);
1153 
1154  CFRelease(enc_info);
1155 
1156  return status;
1157 }
1158 
1159 static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
1160 {
1161  CFArrayRef attachments;
1162  CFDictionaryRef attachment;
1163  CFBooleanRef not_sync;
1164  CFIndex len;
1165 
1166  attachments = CMSampleBufferGetSampleAttachmentsArray(buffer, false);
1167  len = !attachments ? 0 : CFArrayGetCount(attachments);
1168 
1169  if (!len) {
1170  *is_key_frame = true;
1171  return;
1172  }
1173 
1174  attachment = CFArrayGetValueAtIndex(attachments, 0);
1175 
1176  if (CFDictionaryGetValueIfPresent(attachment,
1177  kCMSampleAttachmentKey_NotSync,
1178  (const void **)&not_sync))
1179  {
1180  *is_key_frame = !CFBooleanGetValue(not_sync);
1181  } else {
1182  *is_key_frame = true;
1183  }
1184 }
1185 
1186 static int is_post_sei_nal_type(int nal_type){
1187  return nal_type != H264_NAL_SEI &&
1188  nal_type != H264_NAL_SPS &&
1189  nal_type != H264_NAL_PPS &&
1190  nal_type != H264_NAL_AUD;
1191 }
1192 
1193 /*
1194  * Finds the sei message start/size of type find_sei_type.
1195  * If more than one of that type exists, the last one is returned.
1196  */
1197 static int find_sei_end(AVCodecContext *avctx,
1198  uint8_t *nal_data,
1199  size_t nal_size,
1200  uint8_t **sei_end)
1201 {
1202  int nal_type;
1203  size_t sei_payload_size = 0;
1204  int sei_payload_type = 0;
1205  *sei_end = NULL;
1206  uint8_t *nal_start = nal_data;
1207 
1208  if (!nal_size)
1209  return 0;
1210 
1211  nal_type = *nal_data & 0x1F;
1212  if (nal_type != H264_NAL_SEI)
1213  return 0;
1214 
1215  nal_data++;
1216  nal_size--;
1217 
1218  if (nal_data[nal_size - 1] == 0x80)
1219  nal_size--;
1220 
1221  while (nal_size > 0 && *nal_data > 0) {
1222  do{
1223  sei_payload_type += *nal_data;
1224  nal_data++;
1225  nal_size--;
1226  } while (nal_size > 0 && *nal_data == 0xFF);
1227 
1228  if (!nal_size) {
1229  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing type.\n");
1230  return AVERROR_INVALIDDATA;
1231  }
1232 
1233  do{
1234  sei_payload_size += *nal_data;
1235  nal_data++;
1236  nal_size--;
1237  } while (nal_size > 0 && *nal_data == 0xFF);
1238 
1239  if (nal_size < sei_payload_size) {
1240  av_log(avctx, AV_LOG_ERROR, "Unexpected end of SEI NAL Unit parsing size.\n");
1241  return AVERROR_INVALIDDATA;
1242  }
1243 
1244  nal_data += sei_payload_size;
1245  nal_size -= sei_payload_size;
1246  }
1247 
1248  *sei_end = nal_data;
1249 
1250  return nal_data - nal_start + 1;
1251 }
1252 
1253 /**
1254  * Copies the data inserting emulation prevention bytes as needed.
1255  * Existing data in the destination can be taken into account by providing
1256  * dst with a dst_offset > 0.
1257  *
1258  * @return The number of bytes copied on success. On failure, the negative of
1259  * the number of bytes needed to copy src is returned.
1260  */
1261 static int copy_emulation_prev(const uint8_t *src,
1262  size_t src_size,
1263  uint8_t *dst,
1264  ssize_t dst_offset,
1265  size_t dst_size)
1266 {
1267  int zeros = 0;
1268  int wrote_bytes;
1269  uint8_t* dst_start;
1270  uint8_t* dst_end = dst + dst_size;
1271  const uint8_t* src_end = src + src_size;
1272  int start_at = dst_offset > 2 ? dst_offset - 2 : 0;
1273  int i;
1274  for (i = start_at; i < dst_offset && i < dst_size; i++) {
1275  if (!dst[i])
1276  zeros++;
1277  else
1278  zeros = 0;
1279  }
1280 
1281  dst += dst_offset;
1282  dst_start = dst;
1283  for (; src < src_end; src++, dst++) {
1284  if (zeros == 2) {
1285  int insert_ep3_byte = *src <= 3;
1286  if (insert_ep3_byte) {
1287  if (dst < dst_end)
1288  *dst = 3;
1289  dst++;
1290  }
1291 
1292  zeros = 0;
1293  }
1294 
1295  if (dst < dst_end)
1296  *dst = *src;
1297 
1298  if (!*src)
1299  zeros++;
1300  else
1301  zeros = 0;
1302  }
1303 
1304  wrote_bytes = dst - dst_start;
1305 
1306  if (dst > dst_end)
1307  return -wrote_bytes;
1308 
1309  return wrote_bytes;
1310 }
1311 
1312 static int write_sei(const ExtraSEI *sei,
1313  int sei_type,
1314  uint8_t *dst,
1315  size_t dst_size)
1316 {
1317  uint8_t *sei_start = dst;
1318  size_t remaining_sei_size = sei->size;
1319  size_t remaining_dst_size = dst_size;
1320  int header_bytes;
1321  int bytes_written;
1322  ssize_t offset;
1323 
1324  if (!remaining_dst_size)
1325  return AVERROR_BUFFER_TOO_SMALL;
1326 
1327  while (sei_type && remaining_dst_size != 0) {
1328  int sei_byte = sei_type > 255 ? 255 : sei_type;
1329  *dst = sei_byte;
1330 
1331  sei_type -= sei_byte;
1332  dst++;
1333  remaining_dst_size--;
1334  }
1335 
1336  if (!dst_size)
1337  return AVERROR_BUFFER_TOO_SMALL;
1338 
1339  while (remaining_sei_size && remaining_dst_size != 0) {
1340  int size_byte = remaining_sei_size > 255 ? 255 : remaining_sei_size;
1341  *dst = size_byte;
1342 
1343  remaining_sei_size -= size_byte;
1344  dst++;
1345  remaining_dst_size--;
1346  }
1347 
1348  if (remaining_dst_size < sei->size)
1349  return AVERROR_BUFFER_TOO_SMALL;
1350 
1351  header_bytes = dst - sei_start;
1352 
1353  offset = header_bytes;
1354  bytes_written = copy_emulation_prev(sei->data,
1355  sei->size,
1356  sei_start,
1357  offset,
1358  dst_size);
1359  if (bytes_written < 0)
1360  return AVERROR_BUFFER_TOO_SMALL;
1361 
1362  bytes_written += header_bytes;
1363  return bytes_written;
1364 }
1365 
1366 /**
1367  * Copies NAL units and replaces length codes with
1368  * H.264 Annex B start codes. On failure, the contents of
1369  * dst_data may have been modified.
1370  *
1371  * @param length_code_size Byte length of each length code
1372  * @param sample_buffer NAL units prefixed with length codes.
1373  * @param sei Optional A53 closed captions SEI data.
1374  * @param dst_data Must be zeroed before calling this function.
1375  * Contains the copied NAL units prefixed with
1376  * start codes when the function returns
1377  * successfully.
1378  * @param dst_size Length of dst_data
1379  * @return 0 on success
1380  * AVERROR_INVALIDDATA if length_code_size is invalid
1381  * AVERROR_BUFFER_TOO_SMALL if dst_data is too small
1382  * or if a length_code in src_data specifies data beyond
1383  * the end of its buffer.
1384  */
1386  AVCodecContext *avctx,
1387  size_t length_code_size,
1388  CMSampleBufferRef sample_buffer,
1389  ExtraSEI *sei,
1390  uint8_t *dst_data,
1391  size_t dst_size)
1392 {
1393  size_t src_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1394  size_t remaining_src_size = src_size;
1395  size_t remaining_dst_size = dst_size;
1396  size_t src_offset = 0;
1397  int wrote_sei = 0;
1398  int status;
1399  uint8_t size_buf[4];
1400  uint8_t nal_type;
1401  CMBlockBufferRef block = CMSampleBufferGetDataBuffer(sample_buffer);
1402 
1403  if (length_code_size > 4) {
1404  return AVERROR_INVALIDDATA;
1405  }
1406 
1407  while (remaining_src_size > 0) {
1408  size_t curr_src_len;
1409  size_t curr_dst_len;
1410  size_t box_len = 0;
1411  size_t i;
1412 
1413  uint8_t *dst_box;
1414 
1415  status = CMBlockBufferCopyDataBytes(block,
1416  src_offset,
1417  length_code_size,
1418  size_buf);
1419  if (status) {
1420  av_log(avctx, AV_LOG_ERROR, "Cannot copy length: %d\n", status);
1421  return AVERROR_EXTERNAL;
1422  }
1423 
1424  status = CMBlockBufferCopyDataBytes(block,
1425  src_offset + length_code_size,
1426  1,
1427  &nal_type);
1428 
1429  if (status) {
1430  av_log(avctx, AV_LOG_ERROR, "Cannot copy type: %d\n", status);
1431  return AVERROR_EXTERNAL;
1432  }
1433 
1434  nal_type &= 0x1F;
1435 
1436  for (i = 0; i < length_code_size; i++) {
1437  box_len <<= 8;
1438  box_len |= size_buf[i];
1439  }
1440 
1441  if (sei && !wrote_sei && is_post_sei_nal_type(nal_type)) {
1442  //No SEI NAL unit - insert.
1443  int wrote_bytes;
1444 
1445  memcpy(dst_data, start_code, sizeof(start_code));
1446  dst_data += sizeof(start_code);
1447  remaining_dst_size -= sizeof(start_code);
1448 
1449  *dst_data = H264_NAL_SEI;
1450  dst_data++;
1451  remaining_dst_size--;
1452 
1453  wrote_bytes = write_sei(sei,
1455  dst_data,
1456  remaining_dst_size);
1457 
1458  if (wrote_bytes < 0)
1459  return wrote_bytes;
1460 
1461  remaining_dst_size -= wrote_bytes;
1462  dst_data += wrote_bytes;
1463 
1464  if (remaining_dst_size <= 0)
1465  return AVERROR_BUFFER_TOO_SMALL;
1466 
1467  *dst_data = 0x80;
1468 
1469  dst_data++;
1470  remaining_dst_size--;
1471 
1472  wrote_sei = 1;
1473  }
1474 
1475  curr_src_len = box_len + length_code_size;
1476  curr_dst_len = box_len + sizeof(start_code);
1477 
1478  if (remaining_src_size < curr_src_len) {
1479  return AVERROR_BUFFER_TOO_SMALL;
1480  }
1481 
1482  if (remaining_dst_size < curr_dst_len) {
1483  return AVERROR_BUFFER_TOO_SMALL;
1484  }
1485 
1486  dst_box = dst_data + sizeof(start_code);
1487 
1488  memcpy(dst_data, start_code, sizeof(start_code));
1489  status = CMBlockBufferCopyDataBytes(block,
1490  src_offset + length_code_size,
1491  box_len,
1492  dst_box);
1493 
1494  if (status) {
1495  av_log(avctx, AV_LOG_ERROR, "Cannot copy data: %d\n", status);
1496  return AVERROR_EXTERNAL;
1497  }
1498 
1499  if (sei && !wrote_sei && nal_type == H264_NAL_SEI) {
1500  //Found SEI NAL unit - append.
1501  int wrote_bytes;
1502  int old_sei_length;
1503  int extra_bytes;
1504  uint8_t *new_sei;
1505  old_sei_length = find_sei_end(avctx, dst_box, box_len, &new_sei);
1506  if (old_sei_length < 0)
1507  return status;
1508 
1509  wrote_bytes = write_sei(sei,
1511  new_sei,
1512  remaining_dst_size - old_sei_length);
1513  if (wrote_bytes < 0)
1514  return wrote_bytes;
1515 
1516  if (new_sei + wrote_bytes >= dst_data + remaining_dst_size)
1517  return AVERROR_BUFFER_TOO_SMALL;
1518 
1519  new_sei[wrote_bytes++] = 0x80;
1520  extra_bytes = wrote_bytes - (dst_box + box_len - new_sei);
1521 
1522  dst_data += extra_bytes;
1523  remaining_dst_size -= extra_bytes;
1524 
1525  wrote_sei = 1;
1526  }
1527 
1528  src_offset += curr_src_len;
1529  dst_data += curr_dst_len;
1530 
1531  remaining_src_size -= curr_src_len;
1532  remaining_dst_size -= curr_dst_len;
1533  }
1534 
1535  return 0;
1536 }
1537 
1538 /**
1539  * Returns a sufficient number of bytes to contain the sei data.
1540  * It may be greater than the minimum required.
1541  */
1542 static int get_sei_msg_bytes(const ExtraSEI* sei, int type){
1543  int copied_size;
1544  if (sei->size == 0)
1545  return 0;
1546 
1547  copied_size = -copy_emulation_prev(sei->data,
1548  sei->size,
1549  NULL,
1550  0,
1551  0);
1552 
1553  if ((sei->size % 255) == 0) //may result in an extra byte
1554  copied_size++;
1555 
1556  return copied_size + sei->size / 255 + 1 + type / 255 + 1;
1557 }
1558 
1560  AVCodecContext *avctx,
1561  CMSampleBufferRef sample_buffer,
1562  AVPacket *pkt,
1563  ExtraSEI *sei)
1564 {
1565  VTEncContext *vtctx = avctx->priv_data;
1566 
1567  int status;
1568  bool is_key_frame;
1569  bool add_header;
1570  size_t length_code_size;
1571  size_t header_size = 0;
1572  size_t in_buf_size;
1573  size_t out_buf_size;
1574  size_t sei_nalu_size = 0;
1575  int64_t dts_delta;
1576  int64_t time_base_num;
1577  int nalu_count;
1578  CMTime pts;
1579  CMTime dts;
1580  CMVideoFormatDescriptionRef vid_fmt;
1581 
1582 
1583  vtenc_get_frame_info(sample_buffer, &is_key_frame);
1584  status = get_length_code_size(avctx, sample_buffer, &length_code_size);
1585  if (status) return status;
1586 
1587  add_header = is_key_frame && !(avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER);
1588 
1589  if (add_header) {
1590  vid_fmt = CMSampleBufferGetFormatDescription(sample_buffer);
1591  if (!vid_fmt) {
1592  av_log(avctx, AV_LOG_ERROR, "Cannot get format description.\n");
1593  return AVERROR_EXTERNAL;
1594  }
1595 
1596  int status = get_params_size(avctx, vid_fmt, &header_size);
1597  if (status) return status;
1598  }
1599 
1600  status = count_nalus(length_code_size, sample_buffer, &nalu_count);
1601  if(status)
1602  return status;
1603 
1604  if (sei) {
1605  size_t msg_size = get_sei_msg_bytes(sei,
1607 
1608  sei_nalu_size = sizeof(start_code) + 1 + msg_size + 1;
1609  }
1610 
1611  in_buf_size = CMSampleBufferGetTotalSampleSize(sample_buffer);
1612  out_buf_size = header_size +
1613  in_buf_size +
1614  sei_nalu_size +
1615  nalu_count * ((int)sizeof(start_code) - (int)length_code_size);
1616 
1617  status = ff_alloc_packet2(avctx, pkt, out_buf_size, out_buf_size);
1618  if (status < 0)
1619  return status;
1620 
1621  if (add_header) {
1622  status = copy_param_sets(avctx, vid_fmt, pkt->data, out_buf_size);
1623  if(status) return status;
1624  }
1625 
1626  status = copy_replace_length_codes(
1627  avctx,
1628  length_code_size,
1629  sample_buffer,
1630  sei,
1631  pkt->data + header_size,
1632  pkt->size - header_size
1633  );
1634 
1635  if (status) {
1636  av_log(avctx, AV_LOG_ERROR, "Error copying packet data: %d\n", status);
1637  return status;
1638  }
1639 
1640  if (is_key_frame) {
1641  pkt->flags |= AV_PKT_FLAG_KEY;
1642  }
1643 
1644  pts = CMSampleBufferGetPresentationTimeStamp(sample_buffer);
1645  dts = CMSampleBufferGetDecodeTimeStamp (sample_buffer);
1646 
1647  if (CMTIME_IS_INVALID(dts)) {
1648  if (!vtctx->has_b_frames) {
1649  dts = pts;
1650  } else {
1651  av_log(avctx, AV_LOG_ERROR, "DTS is invalid.\n");
1652  return AVERROR_EXTERNAL;
1653  }
1654  }
1655 
1656  dts_delta = vtctx->dts_delta >= 0 ? vtctx->dts_delta : 0;
1657  time_base_num = avctx->time_base.num;
1658  pkt->pts = pts.value / time_base_num;
1659  pkt->dts = dts.value / time_base_num - dts_delta;
1660  pkt->size = out_buf_size;
1661 
1662  return 0;
1663 }
1664 
1665 /*
1666  * contiguous_buf_size is 0 if not contiguous, and the size of the buffer
1667  * containing all planes if so.
1668  */
1670  AVCodecContext *avctx,
1671  const AVFrame *frame,
1672  int *color,
1673  int *plane_count,
1674  size_t *widths,
1675  size_t *heights,
1676  size_t *strides,
1677  size_t *contiguous_buf_size)
1678 {
1679  VTEncContext *vtctx = avctx->priv_data;
1680  int av_format = frame->format;
1681  int av_color_range = av_frame_get_color_range(frame);
1682  int i;
1683  int range_guessed;
1684  int status;
1685 
1686  status = get_cv_pixel_format(avctx, av_format, av_color_range, color, &range_guessed);
1687  if (status) {
1688  av_log(avctx,
1689  AV_LOG_ERROR,
1690  "Could not get pixel format for color format '%s' range '%s'.\n",
1691  av_get_pix_fmt_name(av_format),
1692  av_color_range > AVCOL_RANGE_UNSPECIFIED &&
1693  av_color_range < AVCOL_RANGE_NB ?
1694  av_color_range_name(av_color_range) :
1695  "Unknown");
1696 
1697  return AVERROR(EINVAL);
1698  }
1699 
1700  if (range_guessed) {
1701  if (!vtctx->warned_color_range) {
1702  vtctx->warned_color_range = true;
1703  av_log(avctx,
1705  "Color range not set for %s. Using MPEG range.\n",
1706  av_get_pix_fmt_name(av_format));
1707  }
1708 
1709  av_log(avctx, AV_LOG_WARNING, "");
1710  }
1711 
1712  switch (av_format) {
1713  case AV_PIX_FMT_NV12:
1714  *plane_count = 2;
1715 
1716  widths [0] = avctx->width;
1717  heights[0] = avctx->height;
1718  strides[0] = frame ? frame->linesize[0] : avctx->width;
1719 
1720  widths [1] = (avctx->width + 1) / 2;
1721  heights[1] = (avctx->height + 1) / 2;
1722  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) & -2;
1723  break;
1724 
1725  case AV_PIX_FMT_YUV420P:
1726  *plane_count = 3;
1727 
1728  widths [0] = avctx->width;
1729  heights[0] = avctx->height;
1730  strides[0] = frame ? frame->linesize[0] : avctx->width;
1731 
1732  widths [1] = (avctx->width + 1) / 2;
1733  heights[1] = (avctx->height + 1) / 2;
1734  strides[1] = frame ? frame->linesize[1] : (avctx->width + 1) / 2;
1735 
1736  widths [2] = (avctx->width + 1) / 2;
1737  heights[2] = (avctx->height + 1) / 2;
1738  strides[2] = frame ? frame->linesize[2] : (avctx->width + 1) / 2;
1739  break;
1740 
1741  default:
1742  av_log(
1743  avctx,
1744  AV_LOG_ERROR,
1745  "Could not get frame format info for color %d range %d.\n",
1746  av_format,
1747  av_color_range);
1748 
1749  return AVERROR(EINVAL);
1750  }
1751 
1752  *contiguous_buf_size = 0;
1753  for (i = 0; i < *plane_count; i++) {
1754  if (i < *plane_count - 1 &&
1755  frame->data[i] + strides[i] * heights[i] != frame->data[i + 1]) {
1756  *contiguous_buf_size = 0;
1757  break;
1758  }
1759 
1760  *contiguous_buf_size += strides[i] * heights[i];
1761  }
1762 
1763  return 0;
1764 }
1765 
1766 #if !TARGET_OS_IPHONE
1767 //Not used on iOS - frame is always copied.
1768 static void free_avframe(
1769  void *release_ctx,
1770  const void *data,
1771  size_t size,
1772  size_t plane_count,
1773  const void *plane_addresses[])
1774 {
1775  AVFrame *frame = release_ctx;
1776  av_frame_free(&frame);
1777 }
1778 #else
1779 //Not used on OSX - frame is never copied.
1780 static int copy_avframe_to_pixel_buffer(AVCodecContext *avctx,
1781  const AVFrame *frame,
1782  CVPixelBufferRef cv_img,
1783  const size_t *plane_strides,
1784  const size_t *plane_rows)
1785 {
1786  int i, j;
1787  size_t plane_count;
1788  int status;
1789  int rows;
1790  int src_stride;
1791  int dst_stride;
1792  uint8_t *src_addr;
1793  uint8_t *dst_addr;
1794  size_t copy_bytes;
1795 
1796  status = CVPixelBufferLockBaseAddress(cv_img, 0);
1797  if (status) {
1798  av_log(
1799  avctx,
1800  AV_LOG_ERROR,
1801  "Error: Could not lock base address of CVPixelBuffer: %d.\n",
1802  status
1803  );
1804  }
1805 
1806  if (CVPixelBufferIsPlanar(cv_img)) {
1807  plane_count = CVPixelBufferGetPlaneCount(cv_img);
1808  for (i = 0; frame->data[i]; i++) {
1809  if (i == plane_count) {
1810  CVPixelBufferUnlockBaseAddress(cv_img, 0);
1811  av_log(avctx,
1812  AV_LOG_ERROR,
1813  "Error: different number of planes in AVFrame and CVPixelBuffer.\n"
1814  );
1815 
1816  return AVERROR_EXTERNAL;
1817  }
1818 
1819  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cv_img, i);
1820  src_addr = (uint8_t*)frame->data[i];
1821  dst_stride = CVPixelBufferGetBytesPerRowOfPlane(cv_img, i);
1822  src_stride = plane_strides[i];
1823  rows = plane_rows[i];
1824 
1825  if (dst_stride == src_stride) {
1826  memcpy(dst_addr, src_addr, src_stride * rows);
1827  } else {
1828  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
1829 
1830  for (j = 0; j < rows; j++) {
1831  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
1832  }
1833  }
1834  }
1835  } else {
1836  if (frame->data[1]) {
1837  CVPixelBufferUnlockBaseAddress(cv_img, 0);
1838  av_log(avctx,
1839  AV_LOG_ERROR,
1840  "Error: different number of planes in AVFrame and non-planar CVPixelBuffer.\n"
1841  );
1842 
1843  return AVERROR_EXTERNAL;
1844  }
1845 
1846  dst_addr = (uint8_t*)CVPixelBufferGetBaseAddress(cv_img);
1847  src_addr = (uint8_t*)frame->data[0];
1848  dst_stride = CVPixelBufferGetBytesPerRow(cv_img);
1849  src_stride = plane_strides[0];
1850  rows = plane_rows[0];
1851 
1852  if (dst_stride == src_stride) {
1853  memcpy(dst_addr, src_addr, src_stride * rows);
1854  } else {
1855  copy_bytes = dst_stride < src_stride ? dst_stride : src_stride;
1856 
1857  for (j = 0; j < rows; j++) {
1858  memcpy(dst_addr + j * dst_stride, src_addr + j * src_stride, copy_bytes);
1859  }
1860  }
1861  }
1862 
1863  status = CVPixelBufferUnlockBaseAddress(cv_img, 0);
1864  if (status) {
1865  av_log(avctx, AV_LOG_ERROR, "Error: Could not unlock CVPixelBuffer base address: %d.\n", status);
1866  return AVERROR_EXTERNAL;
1867  }
1868 
1869  return 0;
1870 }
1871 #endif //!TARGET_OS_IPHONE
1872 
1874  const AVFrame *frame,
1875  CVPixelBufferRef *cv_img)
1876 {
1877  int plane_count;
1878  int color;
1879  size_t widths [AV_NUM_DATA_POINTERS];
1880  size_t heights[AV_NUM_DATA_POINTERS];
1881  size_t strides[AV_NUM_DATA_POINTERS];
1882  int status;
1883  size_t contiguous_buf_size;
1884 #if TARGET_OS_IPHONE
1885  CVPixelBufferPoolRef pix_buf_pool;
1886  VTEncContext* vtctx = avctx->priv_data;
1887 #else
1888  CFMutableDictionaryRef pix_buf_attachments = CFDictionaryCreateMutable(
1889  kCFAllocatorDefault,
1890  10,
1891  &kCFCopyStringDictionaryKeyCallBacks,
1892  &kCFTypeDictionaryValueCallBacks);
1893 
1894  if (!pix_buf_attachments) return AVERROR(ENOMEM);
1895 #endif
1896 
1897  if (avctx->pix_fmt == AV_PIX_FMT_VIDEOTOOLBOX) {
1899 
1900  *cv_img = (CVPixelBufferRef)frame->data[3];
1901  av_assert0(*cv_img);
1902 
1903  CFRetain(*cv_img);
1904  return 0;
1905  }
1906 
1907  memset(widths, 0, sizeof(widths));
1908  memset(heights, 0, sizeof(heights));
1909  memset(strides, 0, sizeof(strides));
1910 
1911  status = get_cv_pixel_info(
1912  avctx,
1913  frame,
1914  &color,
1915  &plane_count,
1916  widths,
1917  heights,
1918  strides,
1919  &contiguous_buf_size
1920  );
1921 
1922  if (status) {
1923  av_log(
1924  avctx,
1925  AV_LOG_ERROR,
1926  "Error: Cannot convert format %d color_range %d: %d\n",
1927  frame->format,
1928  av_frame_get_color_range(frame),
1929  status
1930  );
1931 
1932  return AVERROR_EXTERNAL;
1933  }
1934 
1935 #if TARGET_OS_IPHONE
1936  pix_buf_pool = VTCompressionSessionGetPixelBufferPool(vtctx->session);
1937  if (!pix_buf_pool) {
1938  av_log(avctx, AV_LOG_ERROR, "Could not get pixel buffer pool.\n");
1939  return AVERROR_EXTERNAL;
1940  }
1941 
1942  status = CVPixelBufferPoolCreatePixelBuffer(NULL,
1943  pix_buf_pool,
1944  cv_img);
1945 
1946 
1947  if (status) {
1948  av_log(avctx, AV_LOG_ERROR, "Could not create pixel buffer from pool: %d.\n", status);
1949  return AVERROR_EXTERNAL;
1950  }
1951 
1952  status = copy_avframe_to_pixel_buffer(avctx, frame, *cv_img, strides, heights);
1953  if (status) {
1954  CFRelease(*cv_img);
1955  *cv_img = NULL;
1956  return status;
1957  }
1958 #else
1959  AVFrame *enc_frame = av_frame_alloc();
1960  if (!enc_frame) return AVERROR(ENOMEM);
1961 
1962  status = av_frame_ref(enc_frame, frame);
1963  if (status) {
1964  av_frame_free(&enc_frame);
1965  return status;
1966  }
1967 
1968  status = CVPixelBufferCreateWithPlanarBytes(
1969  kCFAllocatorDefault,
1970  enc_frame->width,
1971  enc_frame->height,
1972  color,
1973  NULL,
1974  contiguous_buf_size,
1975  plane_count,
1976  (void **)enc_frame->data,
1977  widths,
1978  heights,
1979  strides,
1980  free_avframe,
1981  enc_frame,
1982  NULL,
1983  cv_img
1984  );
1985 
1986  add_color_attr(avctx, pix_buf_attachments);
1987  CVBufferSetAttachments(*cv_img, pix_buf_attachments, kCVAttachmentMode_ShouldPropagate);
1988  CFRelease(pix_buf_attachments);
1989 
1990  if (status) {
1991  av_log(avctx, AV_LOG_ERROR, "Error: Could not create CVPixelBuffer: %d\n", status);
1992  return AVERROR_EXTERNAL;
1993  }
1994 #endif
1995 
1996  return 0;
1997 }
1998 
1999 static int create_encoder_dict_h264(const AVFrame *frame,
2000  CFDictionaryRef* dict_out)
2001 {
2002  CFDictionaryRef dict = NULL;
2003  if (frame->pict_type == AV_PICTURE_TYPE_I) {
2004  const void *keys[] = { kVTEncodeFrameOptionKey_ForceKeyFrame };
2005  const void *vals[] = { kCFBooleanTrue };
2006 
2007  dict = CFDictionaryCreate(NULL, keys, vals, 1, NULL, NULL);
2008  if(!dict) return AVERROR(ENOMEM);
2009  }
2010 
2011  *dict_out = dict;
2012  return 0;
2013 }
2014 
2016  VTEncContext *vtctx,
2017  const AVFrame *frame)
2018 {
2019  CMTime time;
2020  CFDictionaryRef frame_dict;
2021  CVPixelBufferRef cv_img = NULL;
2022  AVFrameSideData *side_data = NULL;
2023  ExtraSEI *sei = NULL;
2024  int status = create_cv_pixel_buffer(avctx, frame, &cv_img);
2025 
2026  if (status) return status;
2027 
2028  status = create_encoder_dict_h264(frame, &frame_dict);
2029  if (status) {
2030  CFRelease(cv_img);
2031  return status;
2032  }
2033 
2034  side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC);
2035  if (vtctx->a53_cc && side_data && side_data->size) {
2036  sei = av_mallocz(sizeof(*sei));
2037  if (!sei) {
2038  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2039  } else {
2040  int ret = ff_alloc_a53_sei(frame, 0, &sei->data, &sei->size);
2041  if (ret < 0) {
2042  av_log(avctx, AV_LOG_ERROR, "Not enough memory for closed captions, skipping\n");
2043  av_free(sei);
2044  sei = NULL;
2045  }
2046  }
2047  }
2048 
2049  time = CMTimeMake(frame->pts * avctx->time_base.num, avctx->time_base.den);
2050  status = VTCompressionSessionEncodeFrame(
2051  vtctx->session,
2052  cv_img,
2053  time,
2054  kCMTimeInvalid,
2055  frame_dict,
2056  sei,
2057  NULL
2058  );
2059 
2060  if (frame_dict) CFRelease(frame_dict);
2061  CFRelease(cv_img);
2062 
2063  if (status) {
2064  av_log(avctx, AV_LOG_ERROR, "Error: cannot encode frame: %d\n", status);
2065  return AVERROR_EXTERNAL;
2066  }
2067 
2068  return 0;
2069 }
2070 
2072  AVCodecContext *avctx,
2073  AVPacket *pkt,
2074  const AVFrame *frame,
2075  int *got_packet)
2076 {
2077  VTEncContext *vtctx = avctx->priv_data;
2078  bool get_frame;
2079  int status;
2080  CMSampleBufferRef buf = NULL;
2081  ExtraSEI *sei = NULL;
2082 
2083  if (frame) {
2084  status = vtenc_send_frame(avctx, vtctx, frame);
2085 
2086  if (status) {
2087  status = AVERROR_EXTERNAL;
2088  goto end_nopkt;
2089  }
2090 
2091  if (vtctx->frame_ct_in == 0) {
2092  vtctx->first_pts = frame->pts;
2093  } else if(vtctx->frame_ct_in == 1 && vtctx->has_b_frames) {
2094  vtctx->dts_delta = frame->pts - vtctx->first_pts;
2095  }
2096 
2097  vtctx->frame_ct_in++;
2098  } else if(!vtctx->flushing) {
2099  vtctx->flushing = true;
2100 
2101  status = VTCompressionSessionCompleteFrames(vtctx->session,
2102  kCMTimeIndefinite);
2103 
2104  if (status) {
2105  av_log(avctx, AV_LOG_ERROR, "Error flushing frames: %d\n", status);
2106  status = AVERROR_EXTERNAL;
2107  goto end_nopkt;
2108  }
2109  }
2110 
2111  *got_packet = 0;
2112  get_frame = vtctx->dts_delta >= 0 || !frame;
2113  if (!get_frame) {
2114  status = 0;
2115  goto end_nopkt;
2116  }
2117 
2118  status = vtenc_q_pop(vtctx, !frame, &buf, &sei);
2119  if (status) goto end_nopkt;
2120  if (!buf) goto end_nopkt;
2121 
2122  status = vtenc_cm_to_avpacket(avctx, buf, pkt, sei);
2123  if (sei) {
2124  if (sei->data) av_free(sei->data);
2125  av_free(sei);
2126  }
2127  CFRelease(buf);
2128  if (status) goto end_nopkt;
2129 
2130  *got_packet = 1;
2131  return 0;
2132 
2133 end_nopkt:
2134  av_packet_unref(pkt);
2135  return status;
2136 }
2137 
2139  CMVideoCodecType codec_type,
2140  CFStringRef profile_level,
2141  CFNumberRef gamma_level,
2142  CFDictionaryRef enc_info,
2143  CFDictionaryRef pixel_buffer_info)
2144 {
2145  VTEncContext *vtctx = avctx->priv_data;
2146  AVFrame *frame = av_frame_alloc();
2147  int y_size = avctx->width * avctx->height;
2148  int chroma_size = (avctx->width / 2) * (avctx->height / 2);
2149  CMSampleBufferRef buf = NULL;
2150  int status;
2151 
2152  if (!frame)
2153  return AVERROR(ENOMEM);
2154 
2155  frame->buf[0] = av_buffer_alloc(y_size + 2 * chroma_size);
2156 
2157  if(!frame->buf[0]){
2158  status = AVERROR(ENOMEM);
2159  goto pe_cleanup;
2160  }
2161 
2162  status = vtenc_create_encoder(avctx,
2163  codec_type,
2164  profile_level,
2165  gamma_level,
2166  enc_info,
2167  pixel_buffer_info,
2168  &vtctx->session);
2169  if (status)
2170  goto pe_cleanup;
2171 
2172  frame->data[0] = frame->buf[0]->data;
2173  memset(frame->data[0], 0, y_size);
2174 
2175  frame->data[1] = frame->buf[0]->data + y_size;
2176  memset(frame->data[1], 128, chroma_size);
2177 
2178 
2179  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2180  frame->data[2] = frame->buf[0]->data + y_size + chroma_size;
2181  memset(frame->data[2], 128, chroma_size);
2182  }
2183 
2184  frame->linesize[0] = avctx->width;
2185 
2186  if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
2187  frame->linesize[1] =
2188  frame->linesize[2] = (avctx->width + 1) / 2;
2189  } else {
2190  frame->linesize[1] = (avctx->width + 1) / 2;
2191  }
2192 
2193  frame->format = avctx->pix_fmt;
2194  frame->width = avctx->width;
2195  frame->height = avctx->height;
2196  av_frame_set_colorspace(frame, avctx->colorspace);
2197  av_frame_set_color_range(frame, avctx->color_range);
2198  frame->color_trc = avctx->color_trc;
2199  frame->color_primaries = avctx->color_primaries;
2200 
2201  frame->pts = 0;
2202  status = vtenc_send_frame(avctx, vtctx, frame);
2203  if (status) {
2204  av_log(avctx, AV_LOG_ERROR, "Error sending frame: %d\n", status);
2205  goto pe_cleanup;
2206  }
2207 
2208  //Populates extradata - output frames are flushed and param sets are available.
2209  status = VTCompressionSessionCompleteFrames(vtctx->session,
2210  kCMTimeIndefinite);
2211 
2212  if (status)
2213  goto pe_cleanup;
2214 
2215  status = vtenc_q_pop(vtctx, 0, &buf, NULL);
2216  if (status) {
2217  av_log(avctx, AV_LOG_ERROR, "popping: %d\n", status);
2218  goto pe_cleanup;
2219  }
2220 
2221  CFRelease(buf);
2222 
2223 
2224 
2225 pe_cleanup:
2226  if(vtctx->session)
2227  CFRelease(vtctx->session);
2228 
2229  vtctx->session = NULL;
2230  vtctx->frame_ct_out = 0;
2231 
2232  av_frame_unref(frame);
2233  av_frame_free(&frame);
2234 
2235  av_assert0(status != 0 || (avctx->extradata && avctx->extradata_size > 0));
2236 
2237  return status;
2238 }
2239 
2241 {
2242  VTEncContext *vtctx = avctx->priv_data;
2243 
2244  if(!vtctx->session) return 0;
2245 
2246  VTCompressionSessionCompleteFrames(vtctx->session,
2247  kCMTimeIndefinite);
2248  clear_frame_queue(vtctx);
2250  pthread_mutex_destroy(&vtctx->lock);
2251  CFRelease(vtctx->session);
2252  vtctx->session = NULL;
2253 
2254  if (vtctx->color_primaries) {
2255  CFRelease(vtctx->color_primaries);
2256  vtctx->color_primaries = NULL;
2257  }
2258 
2259  if (vtctx->transfer_function) {
2260  CFRelease(vtctx->transfer_function);
2261  vtctx->transfer_function = NULL;
2262  }
2263 
2264  if (vtctx->ycbcr_matrix) {
2265  CFRelease(vtctx->ycbcr_matrix);
2266  vtctx->ycbcr_matrix = NULL;
2267  }
2268 
2269  return 0;
2270 }
2271 
2272 static const enum AVPixelFormat pix_fmts[] = {
2277 };
2278 
2279 #define OFFSET(x) offsetof(VTEncContext, x)
2280 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
2281 static const AVOption options[] = {
2282  { "profile", "Profile", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = H264_PROF_AUTO }, H264_PROF_AUTO, H264_PROF_COUNT, VE, "profile" },
2283  { "baseline", "Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_BASELINE }, INT_MIN, INT_MAX, VE, "profile" },
2284  { "main", "Main Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_MAIN }, INT_MIN, INT_MAX, VE, "profile" },
2285  { "high", "High Profile", 0, AV_OPT_TYPE_CONST, { .i64 = H264_PROF_HIGH }, INT_MIN, INT_MAX, VE, "profile" },
2286 
2287  { "level", "Level", OFFSET(level), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, VE, "level" },
2288  { "1.3", "Level 1.3, only available with Baseline Profile", 0, AV_OPT_TYPE_CONST, { .i64 = 13 }, INT_MIN, INT_MAX, VE, "level" },
2289  { "3.0", "Level 3.0", 0, AV_OPT_TYPE_CONST, { .i64 = 30 }, INT_MIN, INT_MAX, VE, "level" },
2290  { "3.1", "Level 3.1", 0, AV_OPT_TYPE_CONST, { .i64 = 31 }, INT_MIN, INT_MAX, VE, "level" },
2291  { "3.2", "Level 3.2", 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, INT_MIN, INT_MAX, VE, "level" },
2292  { "4.0", "Level 4.0", 0, AV_OPT_TYPE_CONST, { .i64 = 40 }, INT_MIN, INT_MAX, VE, "level" },
2293  { "4.1", "Level 4.1", 0, AV_OPT_TYPE_CONST, { .i64 = 41 }, INT_MIN, INT_MAX, VE, "level" },
2294  { "4.2", "Level 4.2", 0, AV_OPT_TYPE_CONST, { .i64 = 42 }, INT_MIN, INT_MAX, VE, "level" },
2295  { "5.0", "Level 5.0", 0, AV_OPT_TYPE_CONST, { .i64 = 50 }, INT_MIN, INT_MAX, VE, "level" },
2296  { "5.1", "Level 5.1", 0, AV_OPT_TYPE_CONST, { .i64 = 51 }, INT_MIN, INT_MAX, VE, "level" },
2297  { "5.2", "Level 5.2", 0, AV_OPT_TYPE_CONST, { .i64 = 52 }, INT_MIN, INT_MAX, VE, "level" },
2298 
2299  { "allow_sw", "Allow software encoding", OFFSET(allow_sw), AV_OPT_TYPE_BOOL,
2300  { .i64 = 0 }, 0, 1, VE },
2301 
2302  { "coder", "Entropy coding", OFFSET(entropy), AV_OPT_TYPE_INT, { .i64 = VT_ENTROPY_NOT_SET }, VT_ENTROPY_NOT_SET, VT_CABAC, VE, "coder" },
2303  { "cavlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2304  { "vlc", "CAVLC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CAVLC }, INT_MIN, INT_MAX, VE, "coder" },
2305  { "cabac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2306  { "ac", "CABAC entropy coding", 0, AV_OPT_TYPE_CONST, { .i64 = VT_CABAC }, INT_MIN, INT_MAX, VE, "coder" },
2307 
2308  { "realtime", "Hint that encoding should happen in real-time if not faster (e.g. capturing from camera).",
2309  OFFSET(realtime), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2310 
2311  { "frames_before", "Other frames will come before the frames in this session. This helps smooth concatenation issues.",
2312  OFFSET(frames_before), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2313  { "frames_after", "Other frames will come after the frames in this session. This helps smooth concatenation issues.",
2314  OFFSET(frames_after), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
2315 
2316  { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, VE },
2317 
2318  { NULL },
2319 };
2320 
2322  .class_name = "h264_videotoolbox",
2323  .item_name = av_default_item_name,
2324  .option = options,
2325  .version = LIBAVUTIL_VERSION_INT,
2326 };
2327 
2329  .name = "h264_videotoolbox",
2330  .long_name = NULL_IF_CONFIG_SMALL("VideoToolbox H.264 Encoder"),
2331  .type = AVMEDIA_TYPE_VIDEO,
2332  .id = AV_CODEC_ID_H264,
2333  .priv_data_size = sizeof(VTEncContext),
2334  .pix_fmts = pix_fmts,
2335  .init = vtenc_init,
2336  .encode2 = vtenc_frame,
2337  .close = vtenc_close,
2338  .capabilities = AV_CODEC_CAP_DELAY,
2339  .priv_class = &h264_videotoolbox_class,
2340  .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
2342 };
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:48
ITU-R BT2020 for 12-bit system.
Definition: pixfmt.h:426
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:438
#define NULL
Definition: coverity.c:32
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static void vtenc_q_push(VTEncContext *vtctx, CMSampleBufferRef buffer, ExtraSEI *sei)
#define AV_NUM_DATA_POINTERS
Definition: frame.h:185
static CMVideoCodecType get_cm_codec_type(enum AVCodecID id)
static bool get_vt_profile_level(AVCodecContext *avctx, CFStringRef *profile_level_val)
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
pthread_cond_t cv_sample_sent
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
static void free_avframe(void *release_ctx, const void *data, size_t size, size_t plane_count, const void *plane_addresses[])
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:164
AVOption.
Definition: opt.h:245
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
BufNode * q_head
const char * fmt
Definition: avisynth_c.h:769
struct BufNode * next
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1741
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
enum AVColorRange av_frame_get_color_range(const AVFrame *frame)
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:367
hardware decoding through Videotoolbox
Definition: pixfmt.h:296
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int set_extradata(AVCodecContext *avctx, CMSampleBufferRef sample_buffer)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:1962
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:442
static int copy_param_sets(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, uint8_t *dst, size_t dst_size)
static int create_cv_pixel_buffer_info(AVCodecContext *avctx, CFMutableDictionaryRef *dict)
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2413
int num
Numerator.
Definition: rational.h:59
int size
Definition: avcodec.h:1602
static int write_sei(const ExtraSEI *sei, int sei_type, uint8_t *dst, size_t dst_size)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2087
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:443
enum AVMediaType codec_type
Definition: rtp.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1904
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
static AVPacket pkt
AVCodec.
Definition: avcodec.h:3600
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
AVColorTransferCharacteristic
Color Transfer Characteristic.
Definition: pixfmt.h:410
functionally identical to above
Definition: pixfmt.h:444
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:2597
static const AVOption options[]
static int copy_replace_length_codes(AVCodecContext *avctx, size_t length_code_size, CMSampleBufferRef sample_buffer, ExtraSEI *sei, uint8_t *dst_data, size_t dst_size)
Copies NAL units and replaces length codes with H.264 Annex B start codes.
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1813
static int is_post_sei_nal_type(int nal_type)
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:661
static int16_t block[64]
Definition: dct.c:113
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:984
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int vtenc_cm_to_avpacket(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, AVPacket *pkt, ExtraSEI *sei)
#define FF_CODEC_CAP_INIT_THREADSAFE
The codec does not modify any global variables in the init function, allowing to call the init functi...
Definition: internal.h:40
HMTX pthread_mutex_t
Definition: os2threads.h:49
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
AVOptions.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:94
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:2579
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
Definition: pixfmt.h:415
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:383
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1791
static AVFrame * frame
static int get_cv_pixel_info(AVCodecContext *avctx, const AVFrame *frame, int *color, int *plane_count, size_t *widths, size_t *heights, size_t *strides, size_t *contiguous_buf_size)
Structure to hold side data for an AVFrame.
Definition: frame.h:143
uint8_t * data
Definition: avcodec.h:1601
int64_t frame_ct_in
Not part of ABI.
Definition: pixfmt.h:461
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:146
static int get_length_code_size(AVCodecContext *avctx, CMSampleBufferRef sample_buffer, size_t *size)
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:457
ptrdiff_t size
Definition: opengl_enc.c:101
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVColorPrimaries
Chromaticity coordinates of the source primaries.
Definition: pixfmt.h:389
static av_cold int vtenc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet)
#define av_log(a,...)
#define kCVImageBufferYCbCrMatrix_ITU_R_2020
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1633
void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val)
H.264 common definitions.
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: avcodec.h:191
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1998
static void vtenc_get_frame_info(CMSampleBufferRef buffer, bool *is_key_frame)
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val)
static int find_sei_end(AVCodecContext *avctx, uint8_t *nal_data, size_t nal_size, uint8_t **sei_end)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
BufNode * q_tail
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:57
static void add_color_attr(AVCodecContext *avctx, CFMutableDictionaryRef dict)
static int get_params_size(AVCodecContext *avctx, CMVideoFormatDescriptionRef vid_fmt, size_t *size)
Get the parameter sets from a CMSampleBufferRef.
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:90
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
Definition: pixfmt.h:391
ExtraSEI * sei
simple assert() macros that are a bit more flexible than ISO C assert().
const char * name
Name of the codec implementation.
Definition: avcodec.h:3607
registered user data as specified by Rec. ITU-T T.35
Definition: h264_sei.h:30
VTH264Entropy
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
GLsizei count
Definition: opengl_enc.c:109
static int get_frame(AVFilterContext *ctx, int is_second)
Definition: vf_nnedi.c:689
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1607
const char * av_color_primaries_name(enum AVColorPrimaries primaries)
Definition: pixdesc.c:2585
int64_t frames_before
static int vtenc_create_encoder(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info, VTCompressionSessionRef *session)
VTCompressionSessionRef session
CFStringRef color_primaries
static int get_cv_transfer_function(AVCodecContext *avctx, CFStringRef *transfer_fnc, CFNumberRef *gamma_level)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
CMSampleBufferRef cm_buffer
int width
picture width / height.
Definition: avcodec.h:1863
ITU-R BT2020 non-constant luminance system.
Definition: pixfmt.h:446
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2392
#define OFFSET(x)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
void * data
#define src
Definition: vp9dsp.c:530
static enum AVPixelFormat pix_fmts[]
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:460
#define kCVImageBufferTransferFunction_ITU_R_2020
static int vtenc_q_pop(VTEncContext *vtctx, bool wait, CMSampleBufferRef *buf, ExtraSEI **sei)
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
also ITU-R BT1361
Definition: pixfmt.h:412
static int vtenc_populate_extradata(AVCodecContext *avctx, CMVideoCodecType codec_type, CFStringRef profile_level, CFNumberRef gamma_level, CFDictionaryRef enc_info, CFDictionaryRef pixel_buffer_info)
static const AVClass h264_videotoolbox_class
Libavcodec external API header.
static int get_cv_pixel_format(AVCodecContext *avctx, enum AVPixelFormat fmt, enum AVColorRange range, int *av_pixel_format, int *range_guessed)
enum AVCodecID codec_id
Definition: avcodec.h:1693
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:66
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
int64_t first_pts
main external API structure.
Definition: avcodec.h:1676
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:567
uint8_t * data
The data buffer.
Definition: buffer.h:89
static int create_encoder_dict_h264(const AVFrame *frame, CFDictionaryRef *dict_out)
void * buf
Definition: avisynth_c.h:690
static av_cold int vtenc_close(AVCodecContext *avctx)
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1792
Describe the class of an AVClass context structure.
Definition: log.h:67
CFStringRef transfer_function
static void clear_frame_queue(VTEncContext *vtctx)
static int get_cv_ycbcr_matrix(AVCodecContext *avctx, CFStringRef *matrix)
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2406
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2399
AVCodec ff_h264_videotoolbox_encoder
static int copy_emulation_prev(const uint8_t *src, size_t src_size, uint8_t *dst, ssize_t dst_offset, size_t dst_size)
Copies the data inserting emulation prevention bytes as needed.
static int vtenc_send_frame(AVCodecContext *avctx, VTEncContext *vtctx, const AVFrame *frame)
VT_H264Profile
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: utils.c:1722
int64_t frames_after
mfxU16 profile
Definition: qsvenc.c:42
static int get_sei_msg_bytes(const ExtraSEI *sei, int type)
Returns a sufficient number of bytes to contain the sei data.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:493
static int64_t pts
Global timestamp for the audio frames.
static int flags
Definition: cpu.c:47
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:207
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:882
the normal 219*2^(n-8) "MPEG" YUV ranges
Definition: pixfmt.h:459
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:1889
static int create_cv_pixel_buffer(AVCodecContext *avctx, const AVFrame *frame, CVPixelBufferRef *cv_img)
TARGET_OS_IPHONE.
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
common internal api header.
size_t size
const char * av_color_transfer_name(enum AVColorTransferCharacteristic transfer)
Definition: pixdesc.c:2591
CFStringRef ycbcr_matrix
static void vtenc_output_callback(void *ctx, void *sourceFrameCtx, OSStatus status, VTEncodeInfoFlags flags, CMSampleBufferRef sample_buffer)
also ITU-R BT470BG
Definition: pixfmt.h:416
static void vt_release_num(CFNumberRef *refPtr)
NULL-safe release of *refPtr, and sets value to NULL.
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
pthread_mutex_t lock
int den
Denominator.
Definition: rational.h:60
#define kCVImageBufferColorPrimaries_ITU_R_2020
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:734
void * priv_data
Definition: avcodec.h:1718
#define av_free(p)
int64_t realtime
int len
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:120
enum AVColorPrimaries color_primaries
Definition: frame.h:424
#define VE
int64_t frame_ct_out
ITU-R BT2020 for 10-bit system.
Definition: pixfmt.h:425
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1600
int ff_alloc_a53_sei(const AVFrame *frame, size_t prefix_len, void **data, size_t *sei_size)
Check AVFrame for A53 side data and allocate and fill SEI message with A53 info.
Definition: utils.c:4280
ITU-R BT2020.
Definition: pixfmt.h:400
int height
Definition: frame.h:236
static int count_nalus(size_t length_code_size, CMSampleBufferRef sample_buffer, int *count)
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:426
static const uint8_t start_code[]
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:113
static av_cold int vtenc_init(AVCodecContext *avctx)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2182
static int get_cv_color_primaries(AVCodecContext *avctx, CFStringRef *primaries)
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1578
static void set_async_error(VTEncContext *vtctx, int err)
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1594
GLuint buffer
Definition: opengl_enc.c:102
int64_t dts_delta