FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qtkit.m
Go to the documentation of this file.
1 /*
2  * QTKit input device
3  * Copyright (c) 2013 Vadim Kalinsky <vadim@kalinsky.ru>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * QTKit input device
25  * @author Vadim Kalinsky <vadim@kalinsky.ru>
26  */
27 
28 #if defined(__clang__)
29 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
30 #endif
31 
32 #import <QTKit/QTKit.h>
33 #include <pthread.h>
34 
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/opt.h"
37 #include "libavformat/internal.h"
38 #include "libavutil/internal.h"
39 #include "libavutil/time.h"
40 #include "avdevice.h"
41 
42 #define QTKIT_TIMEBASE 100
43 
44 static const AVRational kQTKitTimeBase_q = {
45  .num = 1,
46  .den = QTKIT_TIMEBASE
47 };
48 
49 typedef struct
50 {
51  AVClass* class;
52 
53  float frame_rate;
55  int64_t first_pts;
59 
62 
63  QTCaptureSession* capture_session;
64  QTCaptureDecompressedVideoOutput* video_output;
65  CVImageBufferRef current_frame;
67 
69 {
71 }
72 
74 {
76 }
77 
78 /** FrameReciever class - delegate for QTCaptureSession
79  */
80 @interface FFMPEG_FrameReceiver : NSObject
81 {
83 }
84 
85 - (id)initWithContext:(CaptureContext*)context;
86 
87 - (void)captureOutput:(QTCaptureOutput *)captureOutput
88  didOutputVideoFrame:(CVImageBufferRef)videoFrame
89  withSampleBuffer:(QTSampleBuffer *)sampleBuffer
90  fromConnection:(QTCaptureConnection *)connection;
91 
92 @end
93 
94 @implementation FFMPEG_FrameReceiver
95 
96 - (id)initWithContext:(CaptureContext*)context
97 {
98  if (self = [super init]) {
99  _context = context;
100  }
101  return self;
102 }
103 
104 - (void)captureOutput:(QTCaptureOutput *)captureOutput
105  didOutputVideoFrame:(CVImageBufferRef)videoFrame
106  withSampleBuffer:(QTSampleBuffer *)sampleBuffer
107  fromConnection:(QTCaptureConnection *)connection
108 {
110  if (_context->current_frame != nil) {
111  CVBufferRelease(_context->current_frame);
112  }
113 
114  _context->current_frame = CVBufferRetain(videoFrame);
115 
117 
119 
121 }
122 
123 @end
124 
126 {
127  [ctx->capture_session stopRunning];
128 
129  [ctx->capture_session release];
130  [ctx->video_output release];
131  [ctx->qt_delegate release];
132 
133  ctx->capture_session = NULL;
134  ctx->video_output = NULL;
135  ctx->qt_delegate = NULL;
136 
139 
140  if (ctx->current_frame)
141  CVBufferRelease(ctx->current_frame);
142 }
143 
145 {
146  NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
147 
149 
150  ctx->first_pts = av_gettime();
151 
154 
155  // List devices if requested
156  if (ctx->list_devices) {
157  av_log(ctx, AV_LOG_INFO, "QTKit video devices:\n");
158  NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
159  for (QTCaptureDevice *device in devices) {
160  const char *name = [[device localizedDisplayName] UTF8String];
161  int index = [devices indexOfObject:device];
162  av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
163  }
164  goto fail;
165  }
166 
167  // Find capture device
168  QTCaptureDevice *video_device = nil;
169 
170  // check for device index given in filename
171  if (ctx->video_device_index == -1) {
172  sscanf(s->filename, "%d", &ctx->video_device_index);
173  }
174 
175  if (ctx->video_device_index >= 0) {
176  NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
177 
178  if (ctx->video_device_index >= [devices count]) {
179  av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
180  goto fail;
181  }
182 
183  video_device = [devices objectAtIndex:ctx->video_device_index];
184  } else if (strncmp(s->filename, "", 1) &&
185  strncmp(s->filename, "default", 7)) {
186  NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
187 
188  for (QTCaptureDevice *device in devices) {
189  if (!strncmp(s->filename, [[device localizedDisplayName] UTF8String], strlen(s->filename))) {
190  video_device = device;
191  break;
192  }
193  }
194  if (!video_device) {
195  av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
196  goto fail;
197  }
198  } else {
199  video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeMuxed];
200  }
201 
202  BOOL success = [video_device open:nil];
203 
204  // Video capture device not found, looking for QTMediaTypeVideo
205  if (!success) {
206  video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeVideo];
207  success = [video_device open:nil];
208 
209  if (!success) {
210  av_log(s, AV_LOG_ERROR, "No QT capture device found\n");
211  goto fail;
212  }
213  }
214 
215  NSString* dev_display_name = [video_device localizedDisplayName];
216  av_log (s, AV_LOG_DEBUG, "'%s' opened\n", [dev_display_name UTF8String]);
217 
218  // Initialize capture session
219  ctx->capture_session = [[QTCaptureSession alloc] init];
220 
221  QTCaptureDeviceInput* capture_dev_input = [[[QTCaptureDeviceInput alloc] initWithDevice:video_device] autorelease];
222  success = [ctx->capture_session addInput:capture_dev_input error:nil];
223 
224  if (!success) {
225  av_log (s, AV_LOG_ERROR, "Failed to add QT capture device to session\n");
226  goto fail;
227  }
228 
229  // Attaching output
230  // FIXME: Allow for a user defined pixel format
231  ctx->video_output = [[QTCaptureDecompressedVideoOutput alloc] init];
232 
233  NSDictionary *captureDictionary = [NSDictionary dictionaryWithObject:
234  [NSNumber numberWithUnsignedInt:kCVPixelFormatType_24RGB]
235  forKey:(id)kCVPixelBufferPixelFormatTypeKey];
236 
237  [ctx->video_output setPixelBufferAttributes:captureDictionary];
238 
239  ctx->qt_delegate = [[FFMPEG_FrameReceiver alloc] initWithContext:ctx];
240 
241  [ctx->video_output setDelegate:ctx->qt_delegate];
242  [ctx->video_output setAutomaticallyDropsLateVideoFrames:YES];
243  [ctx->video_output setMinimumVideoFrameInterval:1.0/ctx->frame_rate];
244 
245  success = [ctx->capture_session addOutput:ctx->video_output error:nil];
246 
247  if (!success) {
248  av_log (s, AV_LOG_ERROR, "can't add video output to capture session\n");
249  goto fail;
250  }
251 
252  [ctx->capture_session startRunning];
253 
254  // Take stream info from the first frame.
255  while (ctx->frames_captured < 1) {
256  CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
257  }
258 
259  lock_frames(ctx);
260 
261  AVStream* stream = avformat_new_stream(s, NULL);
262 
263  if (!stream) {
264  goto fail;
265  }
266 
267  avpriv_set_pts_info(stream, 64, 1, QTKIT_TIMEBASE);
268 
269  stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
270  stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
271  stream->codec->width = (int)CVPixelBufferGetWidth (ctx->current_frame);
272  stream->codec->height = (int)CVPixelBufferGetHeight(ctx->current_frame);
273  stream->codec->pix_fmt = AV_PIX_FMT_RGB24;
274 
275  CVBufferRelease(ctx->current_frame);
276  ctx->current_frame = nil;
277 
278  unlock_frames(ctx);
279 
280  [pool release];
281 
282  return 0;
283 
284 fail:
285  [pool release];
286 
287  destroy_context(ctx);
288 
289  return AVERROR(EIO);
290 }
291 
293 {
295 
296  do {
297  lock_frames(ctx);
298 
299  if (ctx->current_frame != nil) {
300  if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(ctx->current_frame)) < 0) {
301  return AVERROR(EIO);
302  }
303 
304  pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts, AV_TIME_BASE_Q, kQTKitTimeBase_q);
305  pkt->stream_index = 0;
306  pkt->flags |= AV_PKT_FLAG_KEY;
307 
308  CVPixelBufferLockBaseAddress(ctx->current_frame, 0);
309 
310  void* data = CVPixelBufferGetBaseAddress(ctx->current_frame);
311  memcpy(pkt->data, data, pkt->size);
312 
313  CVPixelBufferUnlockBaseAddress(ctx->current_frame, 0);
314  CVBufferRelease(ctx->current_frame);
315  ctx->current_frame = nil;
316  } else {
317  pkt->data = NULL;
319  }
320 
321  unlock_frames(ctx);
322  } while (!pkt->data);
323 
324  return 0;
325 }
326 
328 {
330 
331  destroy_context(ctx);
332 
333  return 0;
334 }
335 
336 static const AVOption options[] = {
337  { "frame_rate", "set frame rate", offsetof(CaptureContext, frame_rate), AV_OPT_TYPE_FLOAT, { .dbl = 30.0 }, 0.1, 30.0, AV_OPT_TYPE_VIDEO_RATE, NULL },
338  { "list_devices", "list available devices", offsetof(CaptureContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
339  { "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
340  { "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
341  { "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(CaptureContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
342  { NULL },
343 };
344 
345 static const AVClass qtkit_class = {
346  .class_name = "QTKit input device",
347  .item_name = av_default_item_name,
348  .option = options,
349  .version = LIBAVUTIL_VERSION_INT,
351 };
352 
354  .name = "qtkit",
355  .long_name = NULL_IF_CONFIG_SMALL("QTKit input device"),
356  .priv_data_size = sizeof(CaptureContext),
360  .flags = AVFMT_NOFILE,
361  .priv_class = &qtkit_class,
362 };
uint32_t BOOL
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:631
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
Definition: os2threads.h:106
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
Definition: os2threads.h:164
AVOption.
Definition: opt.h:245
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
enum AVCodecID id
Definition: mxfenc.c:104
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4427
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int frames_captured
Definition: qtkit.m:54
int num
numerator
Definition: rational.h:44
int size
Definition: avcodec.h:1581
pthread_mutex_t frame_lock
Definition: qtkit.m:56
static AVPacket pkt
static void lock_frames(CaptureContext *ctx)
Definition: qtkit.m:68
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
Definition: os2threads.h:138
Format I/O context.
Definition: avformat.h:1325
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
int list_devices
Definition: qtkit.m:60
HMTX pthread_mutex_t
Definition: os2threads.h:49
AVOptions.
static int qtkit_read_header(AVFormatContext *s)
Definition: qtkit.m:144
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4065
CaptureContext * _context
Definition: qtkit.m:82
uint8_t * data
Definition: avcodec.h:1580
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
Definition: os2threads.h:146
#define av_log(a,...)
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1612
static const AVOption options[]
Definition: qtkit.m:336
id qt_delegate
Definition: qtkit.m:58
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
Main libavdevice API header.
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_default_item_name
#define AVERROR(e)
Definition: error.h:43
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
FrameReciever class - delegate for QTCaptureSession.
Definition: qtkit.m:80
static int qtkit_close(AVFormatContext *s)
Definition: qtkit.m:327
#define QTKIT_TIMEBASE
Definition: qtkit.m:42
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static const AVClass qtkit_class
Definition: qtkit.m:345
GLsizei count
Definition: opengl_enc.c:109
#define fail()
Definition: checkasm.h:81
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1586
static int qtkit_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: qtkit.m:292
int64_t first_pts
Definition: qtkit.m:55
common internal API header
pthread_cond_t frame_wait_cond
Definition: qtkit.m:57
char filename[1024]
input or output filename
Definition: avformat.h:1401
typedef void(APIENTRY *FF_PFNGLACTIVETEXTUREPROC)(GLenum texture)
static void destroy_context(CaptureContext *ctx)
Definition: qtkit.m:125
AVFormatContext * ctx
Definition: movenc.c:48
QTCaptureSession * capture_session
Definition: qtkit.m:63
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
Definition: os2threads.h:98
CVImageBufferRef current_frame
Definition: qtkit.m:65
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:638
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:876
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
int index
Definition: gxfenc.c:89
rational number numerator/denominator
Definition: rational.h:43
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:276
float frame_rate
Definition: qtkit.m:53
offset must point to AVRational
Definition: opt.h:235
static int flags
Definition: cpu.c:47
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:476
static void unlock_frames(CaptureContext *ctx)
Definition: qtkit.m:73
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
Definition: os2threads.h:127
AVInputFormat ff_qtkit_demuxer
Definition: qtkit.m:353
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
Definition: os2threads.h:120
void * priv_data
Format private data.
Definition: avformat.h:1353
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1579
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:660
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
Definition: os2threads.h:113
int video_device_index
Definition: qtkit.m:61
int stream_index
Definition: avcodec.h:1582
QTCaptureDecompressedVideoOutput * video_output
Definition: qtkit.m:64
This structure stores compressed data.
Definition: avcodec.h:1557
static const AVRational kQTKitTimeBase_q
Definition: qtkit.m:44
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1573
const char * name
Definition: opengl_enc.c:103