FFmpeg
avfoundation.m
Go to the documentation of this file.
1 /*
2  * AVFoundation input device
3  * Copyright (c) 2014 Thilo Borgmann <thilo.borgmann@mail.de>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * AVFoundation input device
25  * @author Thilo Borgmann <thilo.borgmann@mail.de>
26  */
27 
28 #import <AVFoundation/AVFoundation.h>
29 #include <pthread.h>
30 
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/avstring.h"
35 #include "libavformat/internal.h"
36 #include "libavutil/internal.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/imgutils.h"
40 #include "avdevice.h"
41 
42 static const int avf_time_base = 1000000;
43 
44 static const AVRational avf_time_base_q = {
45  .num = 1,
46  .den = avf_time_base
47 };
48 
51  OSType avf_id;
52 };
53 
54 static const struct AVFPixelFormatSpec avf_pixel_formats[] = {
55  { AV_PIX_FMT_MONOBLACK, kCVPixelFormatType_1Monochrome },
56  { AV_PIX_FMT_RGB555BE, kCVPixelFormatType_16BE555 },
57  { AV_PIX_FMT_RGB555LE, kCVPixelFormatType_16LE555 },
58  { AV_PIX_FMT_RGB565BE, kCVPixelFormatType_16BE565 },
59  { AV_PIX_FMT_RGB565LE, kCVPixelFormatType_16LE565 },
60  { AV_PIX_FMT_RGB24, kCVPixelFormatType_24RGB },
61  { AV_PIX_FMT_BGR24, kCVPixelFormatType_24BGR },
62  { AV_PIX_FMT_0RGB, kCVPixelFormatType_32ARGB },
63  { AV_PIX_FMT_BGR0, kCVPixelFormatType_32BGRA },
64  { AV_PIX_FMT_0BGR, kCVPixelFormatType_32ABGR },
65  { AV_PIX_FMT_RGB0, kCVPixelFormatType_32RGBA },
66  { AV_PIX_FMT_BGR48BE, kCVPixelFormatType_48RGB },
67  { AV_PIX_FMT_UYVY422, kCVPixelFormatType_422YpCbCr8 },
68  { AV_PIX_FMT_YUVA444P, kCVPixelFormatType_4444YpCbCrA8R },
69  { AV_PIX_FMT_YUVA444P16LE, kCVPixelFormatType_4444AYpCbCr16 },
70  { AV_PIX_FMT_YUV444P, kCVPixelFormatType_444YpCbCr8 },
71  { AV_PIX_FMT_YUV422P16, kCVPixelFormatType_422YpCbCr16 },
72  { AV_PIX_FMT_YUV422P10, kCVPixelFormatType_422YpCbCr10 },
73  { AV_PIX_FMT_YUV444P10, kCVPixelFormatType_444YpCbCr10 },
74  { AV_PIX_FMT_YUV420P, kCVPixelFormatType_420YpCbCr8Planar },
75  { AV_PIX_FMT_NV12, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange },
76  { AV_PIX_FMT_YUYV422, kCVPixelFormatType_422YpCbCr8_yuvs },
77 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
78  { AV_PIX_FMT_GRAY8, kCVPixelFormatType_OneComponent8 },
79 #endif
80  { AV_PIX_FMT_NONE, 0 }
81 };
82 
83 typedef struct
84 {
85  AVClass* class;
86 
92 
94  int width, height;
95 
102 
108 
111 
113 
117  int audio_be;
121 
124 
125  enum AVPixelFormat pixel_format;
126 
127  AVCaptureSession *capture_session;
128  AVCaptureVideoDataOutput *video_output;
129  AVCaptureAudioDataOutput *audio_output;
130  CMSampleBufferRef current_frame;
131  CMSampleBufferRef current_audio_frame;
132 
133  AVCaptureDevice *observed_device;
134 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
135  AVCaptureDeviceTransportControlsPlaybackMode observed_mode;
136 #endif
138 } AVFContext;
139 
141 {
142  pthread_mutex_lock(&ctx->frame_lock);
143 }
144 
146 {
147  pthread_mutex_unlock(&ctx->frame_lock);
148 }
149 
150 /** FrameReciever class - delegate for AVCaptureSession
151  */
152 @interface AVFFrameReceiver : NSObject
153 {
155 }
156 
157 - (id)initWithContext:(AVFContext*)context;
158 
159 - (void) captureOutput:(AVCaptureOutput *)captureOutput
160  didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
161  fromConnection:(AVCaptureConnection *)connection;
162 
163 @end
164 
165 @implementation AVFFrameReceiver
166 
167 - (id)initWithContext:(AVFContext*)context
168 {
169  if (self = [super init]) {
170  _context = context;
171 
172  // start observing if a device is set for it
173 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
174  if (_context->observed_device) {
175  NSString *keyPath = NSStringFromSelector(@selector(transportControlsPlaybackMode));
176  NSKeyValueObservingOptions options = NSKeyValueObservingOptionNew;
177 
178  [_context->observed_device addObserver: self
179  forKeyPath: keyPath
180  options: options
181  context: _context];
182  }
183 #endif
184  }
185  return self;
186 }
187 
188 - (void)dealloc {
189  // stop observing if a device is set for it
190 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
191  if (_context->observed_device) {
192  NSString *keyPath = NSStringFromSelector(@selector(transportControlsPlaybackMode));
193  [_context->observed_device removeObserver: self forKeyPath: keyPath];
194  }
195 #endif
196  [super dealloc];
197 }
198 
199 - (void)observeValueForKeyPath:(NSString *)keyPath
200  ofObject:(id)object
201  change:(NSDictionary *)change
202  context:(void *)context {
203  if (context == _context) {
204 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
205  AVCaptureDeviceTransportControlsPlaybackMode mode =
206  [change[NSKeyValueChangeNewKey] integerValue];
207 
208  if (mode != _context->observed_mode) {
209  if (mode == AVCaptureDeviceTransportControlsNotPlayingMode) {
210  _context->observed_quit = 1;
211  }
212  _context->observed_mode = mode;
213  }
214 #endif
215  } else {
216  [super observeValueForKeyPath: keyPath
217  ofObject: object
218  change: change
219  context: context];
220  }
221 }
222 
223 - (void) captureOutput:(AVCaptureOutput *)captureOutput
224  didOutputSampleBuffer:(CMSampleBufferRef)videoFrame
225  fromConnection:(AVCaptureConnection *)connection
226 {
228 
229  if (_context->current_frame != nil) {
230  CFRelease(_context->current_frame);
231  }
232 
233  _context->current_frame = (CMSampleBufferRef)CFRetain(videoFrame);
234 
236 
238 }
239 
240 @end
241 
242 /** AudioReciever class - delegate for AVCaptureSession
243  */
244 @interface AVFAudioReceiver : NSObject
245 {
247 }
248 
249 - (id)initWithContext:(AVFContext*)context;
250 
251 - (void) captureOutput:(AVCaptureOutput *)captureOutput
252  didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
253  fromConnection:(AVCaptureConnection *)connection;
254 
255 @end
256 
257 @implementation AVFAudioReceiver
258 
259 - (id)initWithContext:(AVFContext*)context
260 {
261  if (self = [super init]) {
262  _context = context;
263  }
264  return self;
265 }
266 
267 - (void) captureOutput:(AVCaptureOutput *)captureOutput
268  didOutputSampleBuffer:(CMSampleBufferRef)audioFrame
269  fromConnection:(AVCaptureConnection *)connection
270 {
272 
273  if (_context->current_audio_frame != nil) {
274  CFRelease(_context->current_audio_frame);
275  }
276 
277  _context->current_audio_frame = (CMSampleBufferRef)CFRetain(audioFrame);
278 
280 
282 }
283 
284 @end
285 
287 {
288  [ctx->capture_session stopRunning];
289 
290  [ctx->capture_session release];
291  [ctx->video_output release];
292  [ctx->audio_output release];
293  [ctx->avf_delegate release];
294  [ctx->avf_audio_delegate release];
295 
296  ctx->capture_session = NULL;
297  ctx->video_output = NULL;
298  ctx->audio_output = NULL;
299  ctx->avf_delegate = NULL;
300  ctx->avf_audio_delegate = NULL;
301 
302  av_freep(&ctx->audio_buffer);
303 
304  pthread_mutex_destroy(&ctx->frame_lock);
305 
306  if (ctx->current_frame) {
307  CFRelease(ctx->current_frame);
308  }
309 }
310 
312 {
313  AVFContext *ctx = (AVFContext*)s->priv_data;
314  char *tmp = av_strdup(s->url);
315  char *save;
316 
317  if (tmp[0] != ':') {
318  ctx->video_filename = av_strtok(tmp, ":", &save);
319  ctx->audio_filename = av_strtok(NULL, ":", &save);
320  } else {
321  ctx->audio_filename = av_strtok(tmp, ":", &save);
322  }
323 }
324 
325 /**
326  * Configure the video device.
327  *
328  * Configure the video device using a run-time approach to access properties
329  * since formats, activeFormat are available since iOS >= 7.0 or OSX >= 10.7
330  * and activeVideoMaxFrameDuration is available since i0S >= 7.0 and OSX >= 10.9.
331  *
332  * The NSUndefinedKeyException must be handled by the caller of this function.
333  *
334  */
335 static int configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
336 {
337  AVFContext *ctx = (AVFContext*)s->priv_data;
338 
339  double framerate = av_q2d(ctx->framerate);
340  NSObject *range = nil;
341  NSObject *format = nil;
342  NSObject *selected_range = nil;
343  NSObject *selected_format = nil;
344 
345  // try to configure format by formats list
346  // might raise an exception if no format list is given
347  // (then fallback to default, no configuration)
348  @try {
349  for (format in [video_device valueForKey:@"formats"]) {
350  CMFormatDescriptionRef formatDescription;
351  CMVideoDimensions dimensions;
352 
353  formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
354  dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
355 
356  if ((ctx->width == 0 && ctx->height == 0) ||
357  (dimensions.width == ctx->width && dimensions.height == ctx->height)) {
358 
359  selected_format = format;
360 
361  for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
362  double max_framerate;
363 
364  [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
365  if (fabs (framerate - max_framerate) < 0.01) {
366  selected_range = range;
367  break;
368  }
369  }
370  }
371  }
372 
373  if (!selected_format) {
374  av_log(s, AV_LOG_ERROR, "Selected video size (%dx%d) is not supported by the device.\n",
375  ctx->width, ctx->height);
376  goto unsupported_format;
377  }
378 
379  if (!selected_range) {
380  av_log(s, AV_LOG_ERROR, "Selected framerate (%f) is not supported by the device.\n",
381  framerate);
382  if (ctx->video_is_muxed) {
383  av_log(s, AV_LOG_ERROR, "Falling back to default.\n");
384  } else {
385  goto unsupported_format;
386  }
387  }
388 
389  if ([video_device lockForConfiguration:NULL] == YES) {
390  if (selected_format) {
391  [video_device setValue:selected_format forKey:@"activeFormat"];
392  }
393  if (selected_range) {
394  NSValue *min_frame_duration = [selected_range valueForKey:@"minFrameDuration"];
395  [video_device setValue:min_frame_duration forKey:@"activeVideoMinFrameDuration"];
396  [video_device setValue:min_frame_duration forKey:@"activeVideoMaxFrameDuration"];
397  }
398  } else {
399  av_log(s, AV_LOG_ERROR, "Could not lock device for configuration.\n");
400  return AVERROR(EINVAL);
401  }
402  } @catch(NSException *e) {
403  av_log(ctx, AV_LOG_WARNING, "Configuration of video device failed, falling back to default.\n");
404  }
405 
406  return 0;
407 
408 unsupported_format:
409 
410  av_log(s, AV_LOG_ERROR, "Supported modes:\n");
411  for (format in [video_device valueForKey:@"formats"]) {
412  CMFormatDescriptionRef formatDescription;
413  CMVideoDimensions dimensions;
414 
415  formatDescription = (CMFormatDescriptionRef) [format performSelector:@selector(formatDescription)];
416  dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription);
417 
418  for (range in [format valueForKey:@"videoSupportedFrameRateRanges"]) {
419  double min_framerate;
420  double max_framerate;
421 
422  [[range valueForKey:@"minFrameRate"] getValue:&min_framerate];
423  [[range valueForKey:@"maxFrameRate"] getValue:&max_framerate];
424  av_log(s, AV_LOG_ERROR, " %dx%d@[%f %f]fps\n",
425  dimensions.width, dimensions.height,
426  min_framerate, max_framerate);
427  }
428  }
429  return AVERROR(EINVAL);
430 }
431 
432 static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
433 {
434  AVFContext *ctx = (AVFContext*)s->priv_data;
435  int ret;
436  NSError *error = nil;
437  AVCaptureInput* capture_input = nil;
438  struct AVFPixelFormatSpec pxl_fmt_spec;
439  NSNumber *pixel_format;
440  NSDictionary *capture_dict;
441  dispatch_queue_t queue;
442 
443  if (ctx->video_device_index < ctx->num_video_devices) {
444  capture_input = (AVCaptureInput*) [[[AVCaptureDeviceInput alloc] initWithDevice:video_device error:&error] autorelease];
445  } else {
446  capture_input = (AVCaptureInput*) video_device;
447  }
448 
449  if (!capture_input) {
450  av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
451  [[error localizedDescription] UTF8String]);
452  return 1;
453  }
454 
455  if ([ctx->capture_session canAddInput:capture_input]) {
456  [ctx->capture_session addInput:capture_input];
457  } else {
458  av_log(s, AV_LOG_ERROR, "can't add video input to capture session\n");
459  return 1;
460  }
461 
462  // Attaching output
463  ctx->video_output = [[AVCaptureVideoDataOutput alloc] init];
464 
465  if (!ctx->video_output) {
466  av_log(s, AV_LOG_ERROR, "Failed to init AV video output\n");
467  return 1;
468  }
469 
470  // Configure device framerate and video size
471  @try {
472  if ((ret = configure_video_device(s, video_device)) < 0) {
473  return ret;
474  }
475  } @catch (NSException *exception) {
476  if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
477  av_log (s, AV_LOG_ERROR, "An error occurred: %s", [exception.reason UTF8String]);
478  return AVERROR_EXTERNAL;
479  }
480  }
481 
482  // select pixel format
483  pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
484 
485  for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
486  if (ctx->pixel_format == avf_pixel_formats[i].ff_id) {
487  pxl_fmt_spec = avf_pixel_formats[i];
488  break;
489  }
490  }
491 
492  // check if selected pixel format is supported by AVFoundation
493  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
494  av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by AVFoundation.\n",
495  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
496  return 1;
497  }
498 
499  // check if the pixel format is available for this device
500  if ([[ctx->video_output availableVideoCVPixelFormatTypes] indexOfObject:[NSNumber numberWithInt:pxl_fmt_spec.avf_id]] == NSNotFound) {
501  av_log(s, AV_LOG_ERROR, "Selected pixel format (%s) is not supported by the input device.\n",
502  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
503 
504  pxl_fmt_spec.ff_id = AV_PIX_FMT_NONE;
505 
506  av_log(s, AV_LOG_ERROR, "Supported pixel formats:\n");
507  for (NSNumber *pxl_fmt in [ctx->video_output availableVideoCVPixelFormatTypes]) {
508  struct AVFPixelFormatSpec pxl_fmt_dummy;
509  pxl_fmt_dummy.ff_id = AV_PIX_FMT_NONE;
510  for (int i = 0; avf_pixel_formats[i].ff_id != AV_PIX_FMT_NONE; i++) {
511  if ([pxl_fmt intValue] == avf_pixel_formats[i].avf_id) {
512  pxl_fmt_dummy = avf_pixel_formats[i];
513  break;
514  }
515  }
516 
517  if (pxl_fmt_dummy.ff_id != AV_PIX_FMT_NONE) {
518  av_log(s, AV_LOG_ERROR, " %s\n", av_get_pix_fmt_name(pxl_fmt_dummy.ff_id));
519 
520  // select first supported pixel format instead of user selected (or default) pixel format
521  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
522  pxl_fmt_spec = pxl_fmt_dummy;
523  }
524  }
525  }
526 
527  // fail if there is no appropriate pixel format or print a warning about overriding the pixel format
528  if (pxl_fmt_spec.ff_id == AV_PIX_FMT_NONE) {
529  return 1;
530  } else {
531  av_log(s, AV_LOG_WARNING, "Overriding selected pixel format to use %s instead.\n",
532  av_get_pix_fmt_name(pxl_fmt_spec.ff_id));
533  }
534  }
535 
536  // set videoSettings to an empty dict for receiving raw data of muxed devices
537  if (ctx->capture_raw_data) {
538  ctx->pixel_format = pxl_fmt_spec.ff_id;
539  ctx->video_output.videoSettings = @{ };
540  } else {
541  ctx->pixel_format = pxl_fmt_spec.ff_id;
542  pixel_format = [NSNumber numberWithUnsignedInt:pxl_fmt_spec.avf_id];
543  capture_dict = [NSDictionary dictionaryWithObject:pixel_format
544  forKey:(id)kCVPixelBufferPixelFormatTypeKey];
545 
546  [ctx->video_output setVideoSettings:capture_dict];
547  }
548  [ctx->video_output setAlwaysDiscardsLateVideoFrames:ctx->drop_late_frames];
549 
550 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
551  // check for transport control support and set observer device if supported
552  if (!ctx->video_is_screen) {
553  int trans_ctrl = [video_device transportControlsSupported];
554  AVCaptureDeviceTransportControlsPlaybackMode trans_mode = [video_device transportControlsPlaybackMode];
555 
556  if (trans_ctrl) {
557  ctx->observed_mode = trans_mode;
558  ctx->observed_device = video_device;
559  }
560  }
561 #endif
562 
563  ctx->avf_delegate = [[AVFFrameReceiver alloc] initWithContext:ctx];
564 
565  queue = dispatch_queue_create("avf_queue", NULL);
566  [ctx->video_output setSampleBufferDelegate:ctx->avf_delegate queue:queue];
567  dispatch_release(queue);
568 
569  if ([ctx->capture_session canAddOutput:ctx->video_output]) {
570  [ctx->capture_session addOutput:ctx->video_output];
571  } else {
572  av_log(s, AV_LOG_ERROR, "can't add video output to capture session\n");
573  return 1;
574  }
575 
576  return 0;
577 }
578 
579 static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
580 {
581  AVFContext *ctx = (AVFContext*)s->priv_data;
582  NSError *error = nil;
583  AVCaptureDeviceInput* audio_dev_input = [[[AVCaptureDeviceInput alloc] initWithDevice:audio_device error:&error] autorelease];
584  dispatch_queue_t queue;
585 
586  if (!audio_dev_input) {
587  av_log(s, AV_LOG_ERROR, "Failed to create AV capture input device: %s\n",
588  [[error localizedDescription] UTF8String]);
589  return 1;
590  }
591 
592  if ([ctx->capture_session canAddInput:audio_dev_input]) {
593  [ctx->capture_session addInput:audio_dev_input];
594  } else {
595  av_log(s, AV_LOG_ERROR, "can't add audio input to capture session\n");
596  return 1;
597  }
598 
599  // Attaching output
600  ctx->audio_output = [[AVCaptureAudioDataOutput alloc] init];
601 
602  if (!ctx->audio_output) {
603  av_log(s, AV_LOG_ERROR, "Failed to init AV audio output\n");
604  return 1;
605  }
606 
607  ctx->avf_audio_delegate = [[AVFAudioReceiver alloc] initWithContext:ctx];
608 
609  queue = dispatch_queue_create("avf_audio_queue", NULL);
610  [ctx->audio_output setSampleBufferDelegate:ctx->avf_audio_delegate queue:queue];
611  dispatch_release(queue);
612 
613  if ([ctx->capture_session canAddOutput:ctx->audio_output]) {
614  [ctx->capture_session addOutput:ctx->audio_output];
615  } else {
616  av_log(s, AV_LOG_ERROR, "adding audio output to capture session failed\n");
617  return 1;
618  }
619 
620  return 0;
621 }
622 
624 {
625  AVFContext *ctx = (AVFContext*)s->priv_data;
626  CVImageBufferRef image_buffer;
627  CMBlockBufferRef block_buffer;
628  CGSize image_buffer_size;
629  AVStream* stream = avformat_new_stream(s, NULL);
630 
631  if (!stream) {
632  return 1;
633  }
634 
635  // Take stream info from the first frame.
636  while (ctx->frames_captured < 1) {
637  CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
638  }
639 
640  lock_frames(ctx);
641 
642  ctx->video_stream_index = stream->index;
643 
644  avpriv_set_pts_info(stream, 64, 1, avf_time_base);
645 
646  image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
647  block_buffer = CMSampleBufferGetDataBuffer(ctx->current_frame);
648 
649  if (image_buffer) {
650  image_buffer_size = CVImageBufferGetEncodedSize(image_buffer);
651 
652  stream->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
653  stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
654  stream->codecpar->width = (int)image_buffer_size.width;
655  stream->codecpar->height = (int)image_buffer_size.height;
656  stream->codecpar->format = ctx->pixel_format;
657  } else {
658  stream->codecpar->codec_id = AV_CODEC_ID_DVVIDEO;
659  stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
660  stream->codecpar->format = ctx->pixel_format;
661  }
662 
663  CFRelease(ctx->current_frame);
664  ctx->current_frame = nil;
665 
667 
668  return 0;
669 }
670 
672 {
673  AVFContext *ctx = (AVFContext*)s->priv_data;
674  CMFormatDescriptionRef format_desc;
675  AVStream* stream = avformat_new_stream(s, NULL);
676 
677  if (!stream) {
678  return 1;
679  }
680 
681  // Take stream info from the first frame.
682  while (ctx->audio_frames_captured < 1) {
683  CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
684  }
685 
686  lock_frames(ctx);
687 
688  ctx->audio_stream_index = stream->index;
689 
690  avpriv_set_pts_info(stream, 64, 1, avf_time_base);
691 
692  format_desc = CMSampleBufferGetFormatDescription(ctx->current_audio_frame);
693  const AudioStreamBasicDescription *basic_desc = CMAudioFormatDescriptionGetStreamBasicDescription(format_desc);
694 
695  if (!basic_desc) {
696  av_log(s, AV_LOG_ERROR, "audio format not available\n");
697  return 1;
698  }
699 
700  stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
701  stream->codecpar->sample_rate = basic_desc->mSampleRate;
702  stream->codecpar->channels = basic_desc->mChannelsPerFrame;
703  stream->codecpar->channel_layout = av_get_default_channel_layout(stream->codecpar->channels);
704 
705  ctx->audio_channels = basic_desc->mChannelsPerFrame;
706  ctx->audio_bits_per_sample = basic_desc->mBitsPerChannel;
707  ctx->audio_float = basic_desc->mFormatFlags & kAudioFormatFlagIsFloat;
708  ctx->audio_be = basic_desc->mFormatFlags & kAudioFormatFlagIsBigEndian;
709  ctx->audio_signed_integer = basic_desc->mFormatFlags & kAudioFormatFlagIsSignedInteger;
710  ctx->audio_packed = basic_desc->mFormatFlags & kAudioFormatFlagIsPacked;
711  ctx->audio_non_interleaved = basic_desc->mFormatFlags & kAudioFormatFlagIsNonInterleaved;
712 
713  if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
714  ctx->audio_float &&
715  ctx->audio_bits_per_sample == 32 &&
716  ctx->audio_packed) {
717  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
718  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
719  ctx->audio_signed_integer &&
720  ctx->audio_bits_per_sample == 16 &&
721  ctx->audio_packed) {
722  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
723  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
724  ctx->audio_signed_integer &&
725  ctx->audio_bits_per_sample == 24 &&
726  ctx->audio_packed) {
727  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
728  } else if (basic_desc->mFormatID == kAudioFormatLinearPCM &&
729  ctx->audio_signed_integer &&
730  ctx->audio_bits_per_sample == 32 &&
731  ctx->audio_packed) {
732  stream->codecpar->codec_id = ctx->audio_be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
733  } else {
734  av_log(s, AV_LOG_ERROR, "audio format is not supported\n");
735  return 1;
736  }
737 
738  if (ctx->audio_non_interleaved) {
739  CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
740  ctx->audio_buffer_size = CMBlockBufferGetDataLength(block_buffer);
741  ctx->audio_buffer = av_malloc(ctx->audio_buffer_size);
742  if (!ctx->audio_buffer) {
743  av_log(s, AV_LOG_ERROR, "error allocating audio buffer\n");
744  return 1;
745  }
746  }
747 
748  CFRelease(ctx->current_audio_frame);
749  ctx->current_audio_frame = nil;
750 
752 
753  return 0;
754 }
755 
757 {
758  NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
759  uint32_t num_screens = 0;
760  AVFContext *ctx = (AVFContext*)s->priv_data;
761  AVCaptureDevice *video_device = nil;
762  AVCaptureDevice *audio_device = nil;
763  // Find capture device
764  NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
765  NSArray *devices_muxed = [AVCaptureDevice devicesWithMediaType:AVMediaTypeMuxed];
766 
767  ctx->num_video_devices = [devices count] + [devices_muxed count];
768 
769  pthread_mutex_init(&ctx->frame_lock, NULL);
770 
771 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
772  CGGetActiveDisplayList(0, NULL, &num_screens);
773 #endif
774 
775  // List devices if requested
776  if (ctx->list_devices) {
777  int index = 0;
778  av_log(ctx, AV_LOG_INFO, "AVFoundation video devices:\n");
779  for (AVCaptureDevice *device in devices) {
780  const char *name = [[device localizedName] UTF8String];
781  index = [devices indexOfObject:device];
782  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
783  }
784  for (AVCaptureDevice *device in devices_muxed) {
785  const char *name = [[device localizedName] UTF8String];
786  index = [devices count] + [devices_muxed indexOfObject:device];
787  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
788  }
789 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
790  if (num_screens > 0) {
791  CGDirectDisplayID screens[num_screens];
792  CGGetActiveDisplayList(num_screens, screens, &num_screens);
793  for (int i = 0; i < num_screens; i++) {
794  av_log(ctx, AV_LOG_INFO, "[%d] Capture screen %d\n", ctx->num_video_devices + i, i);
795  }
796  }
797 #endif
798 
799  av_log(ctx, AV_LOG_INFO, "AVFoundation audio devices:\n");
800  devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
801  for (AVCaptureDevice *device in devices) {
802  const char *name = [[device localizedName] UTF8String];
803  int index = [devices indexOfObject:device];
804  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
805  }
806  goto fail;
807  }
808 
809  // parse input filename for video and audio device
811 
812  // check for device index given in filename
813  if (ctx->video_device_index == -1 && ctx->video_filename) {
814  sscanf(ctx->video_filename, "%d", &ctx->video_device_index);
815  }
816  if (ctx->audio_device_index == -1 && ctx->audio_filename) {
817  sscanf(ctx->audio_filename, "%d", &ctx->audio_device_index);
818  }
819 
820  if (ctx->video_device_index >= 0) {
821  if (ctx->video_device_index < ctx->num_video_devices) {
822  if (ctx->video_device_index < [devices count]) {
823  video_device = [devices objectAtIndex:ctx->video_device_index];
824  } else {
825  video_device = [devices_muxed objectAtIndex:(ctx->video_device_index - [devices count])];
826  ctx->video_is_muxed = 1;
827  }
828  } else if (ctx->video_device_index < ctx->num_video_devices + num_screens) {
829 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
830  CGDirectDisplayID screens[num_screens];
831  CGGetActiveDisplayList(num_screens, screens, &num_screens);
832  AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[ctx->video_device_index - ctx->num_video_devices]] autorelease];
833 
834  if (ctx->framerate.num > 0) {
835  capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
836  }
837 
838 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
839  if (ctx->capture_cursor) {
840  capture_screen_input.capturesCursor = YES;
841  } else {
842  capture_screen_input.capturesCursor = NO;
843  }
844 #endif
845 
846  if (ctx->capture_mouse_clicks) {
847  capture_screen_input.capturesMouseClicks = YES;
848  } else {
849  capture_screen_input.capturesMouseClicks = NO;
850  }
851 
852  video_device = (AVCaptureDevice*) capture_screen_input;
853  ctx->video_is_screen = 1;
854 #endif
855  } else {
856  av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
857  goto fail;
858  }
859  } else if (ctx->video_filename &&
860  strncmp(ctx->video_filename, "none", 4)) {
861  if (!strncmp(ctx->video_filename, "default", 7)) {
862  video_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
863  } else {
864  // looking for video inputs
865  for (AVCaptureDevice *device in devices) {
866  if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
867  video_device = device;
868  break;
869  }
870  }
871  // looking for muxed inputs
872  for (AVCaptureDevice *device in devices_muxed) {
873  if (!strncmp(ctx->video_filename, [[device localizedName] UTF8String], strlen(ctx->video_filename))) {
874  video_device = device;
875  ctx->video_is_muxed = 1;
876  break;
877  }
878  }
879 
880 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070
881  // looking for screen inputs
882  if (!video_device) {
883  int idx;
884  if(sscanf(ctx->video_filename, "Capture screen %d", &idx) && idx < num_screens) {
885  CGDirectDisplayID screens[num_screens];
886  CGGetActiveDisplayList(num_screens, screens, &num_screens);
887  AVCaptureScreenInput* capture_screen_input = [[[AVCaptureScreenInput alloc] initWithDisplayID:screens[idx]] autorelease];
888  video_device = (AVCaptureDevice*) capture_screen_input;
889  ctx->video_device_index = ctx->num_video_devices + idx;
890  ctx->video_is_screen = 1;
891 
892  if (ctx->framerate.num > 0) {
893  capture_screen_input.minFrameDuration = CMTimeMake(ctx->framerate.den, ctx->framerate.num);
894  }
895 
896 #if !TARGET_OS_IPHONE && __MAC_OS_X_VERSION_MIN_REQUIRED >= 1080
897  if (ctx->capture_cursor) {
898  capture_screen_input.capturesCursor = YES;
899  } else {
900  capture_screen_input.capturesCursor = NO;
901  }
902 #endif
903 
904  if (ctx->capture_mouse_clicks) {
905  capture_screen_input.capturesMouseClicks = YES;
906  } else {
907  capture_screen_input.capturesMouseClicks = NO;
908  }
909  }
910  }
911 #endif
912  }
913 
914  if (!video_device) {
915  av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
916  goto fail;
917  }
918  }
919 
920  // get audio device
921  if (ctx->audio_device_index >= 0) {
922  NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
923 
924  if (ctx->audio_device_index >= [devices count]) {
925  av_log(ctx, AV_LOG_ERROR, "Invalid audio device index\n");
926  goto fail;
927  }
928 
929  audio_device = [devices objectAtIndex:ctx->audio_device_index];
930  } else if (ctx->audio_filename &&
931  strncmp(ctx->audio_filename, "none", 4)) {
932  if (!strncmp(ctx->audio_filename, "default", 7)) {
933  audio_device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
934  } else {
935  NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeAudio];
936 
937  for (AVCaptureDevice *device in devices) {
938  if (!strncmp(ctx->audio_filename, [[device localizedName] UTF8String], strlen(ctx->audio_filename))) {
939  audio_device = device;
940  break;
941  }
942  }
943  }
944 
945  if (!audio_device) {
946  av_log(ctx, AV_LOG_ERROR, "Audio device not found\n");
947  goto fail;
948  }
949  }
950 
951  // Video nor Audio capture device not found, looking for AVMediaTypeVideo/Audio
952  if (!video_device && !audio_device) {
953  av_log(s, AV_LOG_ERROR, "No AV capture device found\n");
954  goto fail;
955  }
956 
957  if (video_device) {
958  if (ctx->video_device_index < ctx->num_video_devices) {
959  av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device localizedName] UTF8String]);
960  } else {
961  av_log(s, AV_LOG_DEBUG, "'%s' opened\n", [[video_device description] UTF8String]);
962  }
963  }
964  if (audio_device) {
965  av_log(s, AV_LOG_DEBUG, "audio device '%s' opened\n", [[audio_device localizedName] UTF8String]);
966  }
967 
968  // Initialize capture session
969  ctx->capture_session = [[AVCaptureSession alloc] init];
970 
971  if (video_device && add_video_device(s, video_device)) {
972  goto fail;
973  }
974  if (audio_device && add_audio_device(s, audio_device)) {
975  }
976 
977  [ctx->capture_session startRunning];
978 
979  /* Unlock device configuration only after the session is started so it
980  * does not reset the capture formats */
981  if (!ctx->video_is_screen) {
982  [video_device unlockForConfiguration];
983  }
984 
985  if (video_device && get_video_config(s)) {
986  goto fail;
987  }
988 
989  // set audio stream
990  if (audio_device && get_audio_config(s)) {
991  goto fail;
992  }
993 
994  [pool release];
995  return 0;
996 
997 fail:
998  [pool release];
1000  return AVERROR(EIO);
1001 }
1002 
1004  CVPixelBufferRef image_buffer,
1005  AVPacket *pkt)
1006 {
1007  AVFContext *ctx = s->priv_data;
1008  int src_linesize[4];
1009  const uint8_t *src_data[4];
1010  int width = CVPixelBufferGetWidth(image_buffer);
1011  int height = CVPixelBufferGetHeight(image_buffer);
1012  int status;
1013 
1014  memset(src_linesize, 0, sizeof(src_linesize));
1015  memset(src_data, 0, sizeof(src_data));
1016 
1017  status = CVPixelBufferLockBaseAddress(image_buffer, 0);
1018  if (status != kCVReturnSuccess) {
1019  av_log(s, AV_LOG_ERROR, "Could not lock base address: %d (%dx%d)\n", status, width, height);
1020  return AVERROR_EXTERNAL;
1021  }
1022 
1023  if (CVPixelBufferIsPlanar(image_buffer)) {
1024  size_t plane_count = CVPixelBufferGetPlaneCount(image_buffer);
1025  int i;
1026  for(i = 0; i < plane_count; i++){
1027  src_linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(image_buffer, i);
1028  src_data[i] = CVPixelBufferGetBaseAddressOfPlane(image_buffer, i);
1029  }
1030  } else {
1031  src_linesize[0] = CVPixelBufferGetBytesPerRow(image_buffer);
1032  src_data[0] = CVPixelBufferGetBaseAddress(image_buffer);
1033  }
1034 
1036  src_data, src_linesize,
1037  ctx->pixel_format, width, height, 1);
1038 
1039 
1040 
1041  CVPixelBufferUnlockBaseAddress(image_buffer, 0);
1042 
1043  return status;
1044 }
1045 
1047 {
1048  AVFContext* ctx = (AVFContext*)s->priv_data;
1049 
1050  do {
1051  CVImageBufferRef image_buffer;
1052  CMBlockBufferRef block_buffer;
1053  lock_frames(ctx);
1054 
1055  if (ctx->current_frame != nil) {
1056  int status;
1057  int length = 0;
1058 
1059  image_buffer = CMSampleBufferGetImageBuffer(ctx->current_frame);
1060  block_buffer = CMSampleBufferGetDataBuffer(ctx->current_frame);
1061 
1062  if (image_buffer != nil) {
1063  length = (int)CVPixelBufferGetDataSize(image_buffer);
1064  } else if (block_buffer != nil) {
1065  length = (int)CMBlockBufferGetDataLength(block_buffer);
1066  } else {
1067  return AVERROR(EINVAL);
1068  }
1069 
1070  if (av_new_packet(pkt, length) < 0) {
1071  return AVERROR(EIO);
1072  }
1073 
1074  CMItemCount count;
1075  CMSampleTimingInfo timing_info;
1076 
1077  if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_frame, 1, &timing_info, &count) == noErr) {
1078  AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
1079  pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
1080  }
1081 
1082  pkt->stream_index = ctx->video_stream_index;
1084 
1085  if (image_buffer) {
1086  status = copy_cvpixelbuffer(s, image_buffer, pkt);
1087  } else {
1088  status = 0;
1089  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
1090  if (ret != kCMBlockBufferNoErr) {
1091  status = AVERROR(EIO);
1092  }
1093  }
1094  CFRelease(ctx->current_frame);
1095  ctx->current_frame = nil;
1096 
1097  if (status < 0)
1098  return status;
1099  } else if (ctx->current_audio_frame != nil) {
1100  CMBlockBufferRef block_buffer = CMSampleBufferGetDataBuffer(ctx->current_audio_frame);
1101  int block_buffer_size = CMBlockBufferGetDataLength(block_buffer);
1102 
1103  if (!block_buffer || !block_buffer_size) {
1104  return AVERROR(EIO);
1105  }
1106 
1107  if (ctx->audio_non_interleaved && block_buffer_size > ctx->audio_buffer_size) {
1108  return AVERROR_BUFFER_TOO_SMALL;
1109  }
1110 
1111  if (av_new_packet(pkt, block_buffer_size) < 0) {
1112  return AVERROR(EIO);
1113  }
1114 
1115  CMItemCount count;
1116  CMSampleTimingInfo timing_info;
1117 
1118  if (CMSampleBufferGetOutputSampleTimingInfoArray(ctx->current_audio_frame, 1, &timing_info, &count) == noErr) {
1119  AVRational timebase_q = av_make_q(1, timing_info.presentationTimeStamp.timescale);
1120  pkt->pts = pkt->dts = av_rescale_q(timing_info.presentationTimeStamp.value, timebase_q, avf_time_base_q);
1121  }
1122 
1123  pkt->stream_index = ctx->audio_stream_index;
1125 
1126  if (ctx->audio_non_interleaved) {
1127  int sample, c, shift, num_samples;
1128 
1129  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, ctx->audio_buffer);
1130  if (ret != kCMBlockBufferNoErr) {
1131  return AVERROR(EIO);
1132  }
1133 
1134  num_samples = pkt->size / (ctx->audio_channels * (ctx->audio_bits_per_sample >> 3));
1135 
1136  // transform decoded frame into output format
1137  #define INTERLEAVE_OUTPUT(bps) \
1138  { \
1139  int##bps##_t **src; \
1140  int##bps##_t *dest; \
1141  src = av_malloc(ctx->audio_channels * sizeof(int##bps##_t*)); \
1142  if (!src) return AVERROR(EIO); \
1143  for (c = 0; c < ctx->audio_channels; c++) { \
1144  src[c] = ((int##bps##_t*)ctx->audio_buffer) + c * num_samples; \
1145  } \
1146  dest = (int##bps##_t*)pkt->data; \
1147  shift = bps - ctx->audio_bits_per_sample; \
1148  for (sample = 0; sample < num_samples; sample++) \
1149  for (c = 0; c < ctx->audio_channels; c++) \
1150  *dest++ = src[c][sample] << shift; \
1151  av_freep(&src); \
1152  }
1153 
1154  if (ctx->audio_bits_per_sample <= 16) {
1155  INTERLEAVE_OUTPUT(16)
1156  } else {
1157  INTERLEAVE_OUTPUT(32)
1158  }
1159  } else {
1160  OSStatus ret = CMBlockBufferCopyDataBytes(block_buffer, 0, pkt->size, pkt->data);
1161  if (ret != kCMBlockBufferNoErr) {
1162  return AVERROR(EIO);
1163  }
1164  }
1165 
1166  CFRelease(ctx->current_audio_frame);
1167  ctx->current_audio_frame = nil;
1168  } else {
1169  pkt->data = NULL;
1170  unlock_frames(ctx);
1171  if (ctx->observed_quit) {
1172  return AVERROR_EOF;
1173  } else {
1174  return AVERROR(EAGAIN);
1175  }
1176  }
1177 
1178  unlock_frames(ctx);
1179  } while (!pkt->data);
1180 
1181  return 0;
1182 }
1183 
1185 {
1186  AVFContext* ctx = (AVFContext*)s->priv_data;
1188  return 0;
1189 }
1190 
1191 static const AVOption options[] = {
1192  { "list_devices", "list available devices", offsetof(AVFContext, list_devices), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1193  { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(AVFContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1194  { "audio_device_index", "select audio device by index for devices with same name (starts at 0)", offsetof(AVFContext, audio_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1195  { "pixel_format", "set pixel format", offsetof(AVFContext, pixel_format), AV_OPT_TYPE_PIXEL_FMT, {.i64 = AV_PIX_FMT_YUV420P}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM},
1196  { "framerate", "set frame rate", offsetof(AVFContext, framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
1197  { "video_size", "set video size", offsetof(AVFContext, width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
1198  { "capture_cursor", "capture the screen cursor", offsetof(AVFContext, capture_cursor), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1199  { "capture_mouse_clicks", "capture the screen mouse clicks", offsetof(AVFContext, capture_mouse_clicks), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1200  { "capture_raw_data", "capture the raw data from device connection", offsetof(AVFContext, capture_raw_data), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1201  { "drop_late_frames", "drop frames that are available later than expected", offsetof(AVFContext, drop_late_frames), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AV_OPT_FLAG_DECODING_PARAM },
1202 
1203  { NULL },
1204 };
1205 
1206 static const AVClass avf_class = {
1207  .class_name = "AVFoundation indev",
1208  .item_name = av_default_item_name,
1209  .option = options,
1210  .version = LIBAVUTIL_VERSION_INT,
1212 };
1213 
1215  .name = "avfoundation",
1216  .long_name = NULL_IF_CONFIG_SMALL("AVFoundation input device"),
1217  .priv_data_size = sizeof(AVFContext),
1220  .read_close = avf_close,
1221  .flags = AVFMT_NOFILE,
1222  .priv_class = &avf_class,
1223 };
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
AV_CODEC_ID_PCM_S16LE
@ AV_CODEC_ID_PCM_S16LE
Definition: codec_id.h:313
pthread_mutex_t
_fmutex pthread_mutex_t
Definition: os2threads.h:53
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AV_CODEC_ID_PCM_F32BE
@ AV_CODEC_ID_PCM_F32BE
Definition: codec_id.h:333
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
status
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
AVFContext::audio_buffer_size
int audio_buffer_size
Definition: avfoundation.m:123
init
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4374
AVFContext::audio_float
int audio_float
Definition: avfoundation.m:116
AVFContext::observed_quit
int observed_quit
Definition: avfoundation.m:137
unlock_frames
static void unlock_frames(AVFContext *ctx)
Definition: avfoundation.m:145
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_OPT_TYPE_VIDEO_RATE
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:237
pthread_mutex_init
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:104
parse_device_name
static void parse_device_name(AVFormatContext *s)
Definition: avfoundation.m:311
AV_CODEC_ID_RAWVIDEO
@ AV_CODEC_ID_RAWVIDEO
Definition: codec_id.h:63
AVFContext::current_audio_frame
CMSampleBufferRef current_audio_frame
Definition: avfoundation.m:131
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
pixdesc.h
index
fg index
Definition: ffmpeg_filter.c:168
AVFContext::audio_frames_captured
int audio_frames_captured
Definition: avfoundation.m:88
AVPacket::data
uint8_t * data
Definition: packet.h:373
AVOption
AVOption.
Definition: opt.h:247
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:404
AV_PIX_FMT_RGB555BE
@ AV_PIX_FMT_RGB555BE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined
Definition: pixfmt.h:107
AVFContext::audio_channels
int audio_channels
Definition: avfoundation.m:114
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVFContext::video_filename
char * video_filename
Definition: avfoundation.m:109
AVFPixelFormatSpec::avf_id
OSType avf_id
Definition: avfoundation.m:51
AVFContext::audio_be
int audio_be
Definition: avfoundation.m:117
framerate
int framerate
Definition: h264_levels.c:65
AVFContext::capture_cursor
int capture_cursor
Definition: avfoundation.m:96
AV_CODEC_ID_PCM_S16BE
@ AV_CODEC_ID_PCM_S16BE
Definition: codec_id.h:314
fail
#define fail()
Definition: checkasm.h:136
avf_close
static int avf_close(AVFormatContext *s)
Definition: avfoundation.m:1184
avf_time_base
static const int avf_time_base
Definition: avfoundation.m:42
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:141
AVFContext::current_frame
CMSampleBufferRef current_frame
Definition: avfoundation.m:130
AVFPixelFormatSpec::ff_id
enum AVPixelFormat ff_id
Definition: avfoundation.m:50
AVFContext::observed_device
AVCaptureDevice * observed_device
Definition: avfoundation.m:133
AVERROR_BUFFER_TOO_SMALL
#define AVERROR_BUFFER_TOO_SMALL
Buffer too small.
Definition: error.h:51
AVRational::num
int num
Numerator.
Definition: rational.h:59
AVFContext::framerate
AVRational framerate
Definition: avfoundation.m:93
AV_PIX_FMT_YUV444P10
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:392
description
Tag description
Definition: snow.txt:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
avf_time_base_q
static const AVRational avf_time_base_q
Definition: avfoundation.m:44
AVInputFormat
Definition: avformat.h:626
AV_PIX_FMT_YUV422P16
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:401
AVFContext::num_video_devices
int num_video_devices
Definition: avfoundation.m:112
INTERLEAVE_OUTPUT
#define INTERLEAVE_OUTPUT(bps)
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
av_new_packet
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:94
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:631
AVFAudioReceiver::_context
AVFContext * _context
Definition: avfoundation.m:246
options
static const AVOption options[]
Definition: avfoundation.m:1191
add_audio_device
static int add_audio_device(AVFormatContext *s, AVCaptureDevice *audio_device)
Definition: avfoundation.m:579
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
AVFContext::capture_mouse_clicks
int capture_mouse_clicks
Definition: avfoundation.m:97
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
ctx
AVFormatContext * ctx
Definition: movenc.c:48
AVFContext::frame_lock
pthread_mutex_t frame_lock
Definition: avfoundation.m:89
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
AVFContext::capture_raw_data
int capture_raw_data
Definition: avfoundation.m:98
AVFContext::list_devices
int list_devices
Definition: avfoundation.m:103
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AVFPixelFormatSpec
Definition: avfoundation.m:49
get_video_config
static int get_video_config(AVFormatContext *s)
Definition: avfoundation.m:623
if
if(ret)
Definition: filter_design.txt:179
AVFContext::audio_packed
int audio_packed
Definition: avfoundation.m:119
AVFFrameReceiver::_context
AVFContext * _context
Definition: avfoundation.m:154
context
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are in without and describe what they for example set the foo of the bar offset is the offset of the field in your context
Definition: writing_filters.txt:91
AVFormatContext
Format I/O context.
Definition: avformat.h:1107
internal.h
AVFContext::video_output
AVCaptureVideoDataOutput * video_output
Definition: avfoundation.m:128
AVFContext::audio_signed_integer
int audio_signed_integer
Definition: avfoundation.m:118
AV_PIX_FMT_RGB565LE
@ AV_PIX_FMT_RGB565LE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:106
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:527
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
NULL
#define NULL
Definition: coverity.c:32
AVFContext::drop_late_frames
int drop_late_frames
Definition: avfoundation.m:99
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
add_video_device
static int add_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
Definition: avfoundation.m:432
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFFrameReceiver
FrameReciever class - delegate for AVCaptureSession.
Definition: avfoundation.m:152
AV_PIX_FMT_MONOBLACK
@ AV_PIX_FMT_MONOBLACK
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb.
Definition: pixfmt.h:76
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:234
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
parseutils.h
AV_PIX_FMT_BGR0
@ AV_PIX_FMT_BGR0
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
Definition: pixfmt.h:230
time.h
AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
@ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
Definition: log.h:41
AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:390
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
avf_read_packet
static int avf_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: avfoundation.m:1046
AVFContext::width
int width
Definition: avfoundation.m:94
configure_video_device
static int configure_video_device(AVFormatContext *s, AVCaptureDevice *video_device)
Configure the video device.
Definition: avfoundation.m:335
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
pthread_mutex_unlock
#define pthread_mutex_unlock(a)
Definition: ffprobe.c:68
AVFContext::audio_buffer
int32_t * audio_buffer
Definition: avfoundation.m:122
AVFContext::video_stream_index
int video_stream_index
Definition: avfoundation.m:105
AV_CODEC_ID_PCM_S24LE
@ AV_CODEC_ID_PCM_S24LE
Definition: codec_id.h:325
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVPacket::size
int size
Definition: packet.h:374
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:116
id
enum AVCodecID id
Definition: extract_extradata_bsf.c:325
destroy_context
static void destroy_context(AVFContext *ctx)
Definition: avfoundation.m:286
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4799
sample
#define sample
Definition: flacdsp_template.c:44
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
ff_avfoundation_demuxer
const AVInputFormat ff_avfoundation_demuxer
Definition: avfoundation.m:1214
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:463
AVFContext::audio_non_interleaved
int audio_non_interleaved
Definition: avfoundation.m:120
avdevice.h
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
height
#define height
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:167
AVERROR_EXTERNAL
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
AV_PIX_FMT_RGB0
@ AV_PIX_FMT_RGB0
packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
Definition: pixfmt.h:228
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
pthread_mutex_destroy
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:112
lock_frames
static void lock_frames(AVFContext *ctx)
Definition: avfoundation.m:140
AVFContext::audio_stream_index
int audio_stream_index
Definition: avfoundation.m:107
copy_cvpixelbuffer
static int copy_cvpixelbuffer(AVFormatContext *s, CVPixelBufferRef image_buffer, AVPacket *pkt)
Definition: avfoundation.m:1003
AV_PIX_FMT_RGB555LE
@ AV_PIX_FMT_RGB555LE
packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined
Definition: pixfmt.h:108
AVFContext::audio_bits_per_sample
int audio_bits_per_sample
Definition: avfoundation.m:115
i
int i
Definition: input.c:406
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
avf_read_header
static int avf_read_header(AVFormatContext *s)
Definition: avfoundation.m:756
internal.h
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:278
AV_CODEC_ID_DVVIDEO
@ AV_CODEC_ID_DVVIDEO
Definition: codec_id.h:74
AV_CODEC_ID_PCM_S32BE
@ AV_CODEC_ID_PCM_S32BE
Definition: codec_id.h:322
AVFContext::frames_captured
int frames_captured
Definition: avfoundation.m:87
AVFContext::video_is_muxed
int video_is_muxed
Definition: avfoundation.m:100
ret
ret
Definition: filter_design.txt:187
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVStream
Stream structure.
Definition: avformat.h:847
AV_PIX_FMT_0BGR
@ AV_PIX_FMT_0BGR
packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined
Definition: pixfmt.h:229
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
AVFContext::audio_device_index
int audio_device_index
Definition: avfoundation.m:106
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
avf_pixel_formats
static const struct AVFPixelFormatSpec avf_pixel_formats[]
Definition: avfoundation.m:54
AVFContext::audio_output
AVCaptureAudioDataOutput * audio_output
Definition: avfoundation.m:129
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
AVFContext::avf_audio_delegate
id avf_audio_delegate
Definition: avfoundation.m:91
channel_layout.h
AVFContext::video_is_screen
int video_is_screen
Definition: avfoundation.m:101
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:224
AV_OPT_TYPE_PIXEL_FMT
@ AV_OPT_TYPE_PIXEL_FMT
Definition: opt.h:235
AVPacket::stream_index
int stream_index
Definition: packet.h:375
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFContext::audio_filename
char * audio_filename
Definition: avfoundation.m:110
AV_PIX_FMT_RGB565BE
@ AV_PIX_FMT_RGB565BE
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
Definition: pixfmt.h:105
shift
static int shift(int a, int b)
Definition: sonic.c:83
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:259
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_CODEC_ID_PCM_S32LE
@ AV_CODEC_ID_PCM_S32LE
Definition: codec_id.h:321
get_audio_config
static int get_audio_config(AVFormatContext *s)
Definition: avfoundation.m:671
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:231
AVFContext
Definition: avfoundation.m:83
timing_info
static int FUNC() timing_info(CodedBitstreamContext *ctx, RWContext *rw, AV1RawTimingInfo *current)
Definition: cbs_av1_syntax_template.c:158
av_image_copy_to_buffer
int av_image_copy_to_buffer(uint8_t *dst, int dst_size, const uint8_t *const src_data[4], const int src_linesize[4], enum AVPixelFormat pix_fmt, int width, int height, int align)
Copy image data from an image into a buffer.
Definition: imgutils.c:501
AVPacket
This structure stores compressed data.
Definition: packet.h:350
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:241
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
format
fg outputs[0] format
Definition: ffmpeg_filter.c:175
int32_t
int32_t
Definition: audioconvert.c:56
imgutils.h
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
AVFContext::video_device_index
int video_device_index
Definition: avfoundation.m:104
AV_PIX_FMT_0RGB
@ AV_PIX_FMT_0RGB
packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
Definition: pixfmt.h:227
AV_CODEC_ID_PCM_F32LE
@ AV_CODEC_ID_PCM_F32LE
Definition: codec_id.h:334
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVFAudioReceiver
AudioReciever class - delegate for AVCaptureSession.
Definition: avfoundation.m:244
avstring.h
AVFContext::avf_delegate
id avf_delegate
Definition: avfoundation.m:90
AV_PIX_FMT_YUVA444P16LE
@ AV_PIX_FMT_YUVA444P16LE
planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
Definition: pixfmt.h:185
avf_class
static const AVClass avf_class
Definition: avfoundation.m:1206
int
int
Definition: ffmpeg_filter.c:156
AVFContext::capture_session
AVCaptureSession * capture_session
Definition: avfoundation.m:127
AV_CODEC_ID_PCM_S24BE
@ AV_CODEC_ID_PCM_S24BE
Definition: codec_id.h:326
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2460
AV_PIX_FMT_BGR48BE
@ AV_PIX_FMT_BGR48BE
packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big...
Definition: pixfmt.h:138
pthread_mutex_lock
#define pthread_mutex_lock(a)
Definition: ffprobe.c:64