Go to the documentation of this file.
26 #import <CoreImage/CoreImage.h>
27 #import <AppKit/AppKit.h>
99 NSArray *filter_categories = nil;
101 if (
ctx->list_generators && !
ctx->list_filters) {
102 filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
105 for (NSString *filter_name in [CIFilter filterNamesInCategories:filter_categories]) {
106 CIFilter *
filter = [CIFilter filterWithName:filter_name];
107 NSDictionary<NSString *, id> *filter_attribs = [filter attributes];
112 NSDictionary *input_attribs = [filter_attribs valueForKey:input];
113 NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
114 if ([input_class isEqualToString:
@"NSNumber"]) {
115 NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
116 NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
117 NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
121 [input_class UTF8String],
122 [[value_min stringValue] UTF8String],
123 [[value_max stringValue] UTF8String],
124 [[value_default stringValue] UTF8String]);
128 [input_class UTF8String]);
144 NSData *
data = [NSData dataWithBytesNoCopy:frame->data[0]
145 length:frame->height*frame->linesize[0]
148 CIImage *
ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
149 bytesPerRow:frame->linesize[0]
151 format:kCIFormatARGB8
152 colorSpace:ctx->color_space];
159 CIImage *filter_input = (__bridge CIImage*)
ctx->input_image;
160 CIImage *filter_output =
NULL;
166 filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
167 CGRect out_rect = [filter_input extent];
168 if (out_rect.size.width >
frame->width || out_rect.size.height >
frame->height) {
170 out_rect.origin.x = 0.0f;
171 out_rect.origin.y = 0.0f;
172 out_rect.size.width =
frame->width;
173 out_rect.size.height =
frame->height;
175 filter_input = [filter_input imageByCroppingToRect:out_rect];
181 if (!
ctx->is_video_source ||
i) {
183 [filter setValue:filter_input forKey:kCIInputImageKey];
184 }
@catch (NSException *exception) {
185 if (![[exception
name] isEqualToString:NSUndefinedKeyException]) {
196 filter_output = [filter valueForKey:kCIOutputImageKey];
198 if (!filter_output) {
204 CGRect out_rect = [filter_output extent];
205 if (out_rect.size.width >
frame->width || out_rect.size.height >
frame->height) {
207 out_rect.origin.x = 0.0f;
208 out_rect.origin.y = 0.0f;
209 out_rect.size.width =
frame->width;
210 out_rect.size.height =
frame->height;
213 CGImageRef
out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
222 CGContextRelease(
ctx->cgctx);
225 size_t out_width = CGImageGetWidth(
out);
226 size_t out_height = CGImageGetHeight(
out);
228 if (out_width >
frame->width || out_height >
frame->height) {
229 av_log(
ctx,
AV_LOG_WARNING,
"Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
230 out_width, out_height,
frame->width,
frame->height);
232 ctx->cgctx = CGBitmapContextCreate(
frame->data[0],
235 ctx->bits_per_component,
238 (uint32_t)kCGImageAlphaPremultipliedFirst);
246 if (
ctx->output_rect) {
248 NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
249 NSRect
tmp = NSRectFromString(tmp_string);
251 }
@catch (NSException *exception) {
255 if (
rect.size.width == 0.0f) {
258 if (
rect.size.height == 0.0f) {
305 frame->key_frame = 1;
307 #if FF_API_INTERLACED_FRAME
308 frame->interlaced_frame = 0;
313 frame->sample_aspect_ratio =
ctx->sar;
324 NSString *input_key = [NSString stringWithUTF8String:key];
325 NSString *input_val = [NSString stringWithUTF8String:value];
327 NSDictionary *filter_attribs = [filter attributes];
328 NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
330 NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
331 NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
333 if (!input_attribs) {
335 [input_key UTF8String]);
340 [input_key UTF8String],
341 [input_val UTF8String],
342 input_attribs ? (
unsigned long)[input_attribs count] : -1,
343 [input_class UTF8String],
344 [input_type UTF8String]);
346 if ([input_class isEqualToString:
@"NSNumber"]) {
347 float input = input_val.floatValue;
348 NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
349 NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
350 NSNumber *used_value = nil;
352 #define CLAMP_WARNING do { \
353 av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
355 [input_key UTF8String], \
356 min_value.floatValue, \
357 max_value.floatValue, \
358 used_value.floatValue); \
360 if (
input > max_value.floatValue) {
361 used_value = max_value;
363 }
else if (
input < min_value.floatValue) {
364 used_value = min_value;
367 used_value = [NSNumber numberWithFloat:input];
370 [filter setValue:used_value forKey:input_key];
371 }
else if ([input_class isEqualToString:
@"CIVector"]) {
372 CIVector *
input = [CIVector vectorWithString:input_val];
376 [input_val UTF8String]);
380 [filter setValue:input forKey:input_key];
381 }
else if ([input_class isEqualToString:
@"CIColor"]) {
382 CIColor *
input = [CIColor colorWithString:input_val];
386 [input_val UTF8String]);
390 [filter setValue:input forKey:input_key];
391 }
else if ([input_class isEqualToString:
@"NSString"]) {
392 [filter setValue:input_val forKey:input_key];
393 }
else if ([input_class isEqualToString:
@"NSData"]) {
394 NSData *
input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
395 length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
399 [input_val UTF8String]);
403 [filter setValue:input forKey:input_key];
406 [input_class UTF8String]);
418 CIFilter *
filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
421 [filter setDefaults];
424 if (filter_options) {
443 if (
ctx->list_filters ||
ctx->list_generators) {
448 if (
ctx->filter_string) {
472 if (strncmp(
f->value,
"default", 7)) {
483 if (!filter_options) {
493 if (!
ctx->filters[
i]) {
506 const NSOpenGLPixelFormatAttribute attr[] = {
507 NSOpenGLPFAAccelerated,
508 NSOpenGLPFANoRecovery,
509 NSOpenGLPFAColorSize, 32,
513 NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
514 ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
515 ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
516 pixelFormat:[pixel_format CGLPixelFormatObj]
517 colorSpace:
ctx->color_space
526 ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
535 ctx->is_video_source = 1;
544 #define SafeCFRelease(ptr) do { \
559 for (
int i = 0;
i <
ctx->num_filters;
i++) {
593 #define OFFSET(x) offsetof(CoreImageContext, x)
594 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
596 #define GENERATOR_OPTIONS \
597 {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
598 {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
599 {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
600 {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
601 {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
602 {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
603 {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
605 #define FILTER_OPTIONS \
606 {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
607 {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
608 {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
609 {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
626 .priv_class = &coreimage_class,
642 .
name =
"coreimagesrc",
647 .priv_class = &coreimagesrc_class,
const AVFilter ff_vsrc_coreimagesrc
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
CFTypeRef glctx
OpenGL context.
#define FF_ENABLE_DEPRECATION_WARNINGS
#define AV_LOG_WARNING
Something somehow does not look correct.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
int64_t duration
duration expressed in microseconds
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
CGContextRef cgctx
Bitmap context for image copy.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
AVFILTER_DEFINE_CLASS(coreimage)
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define FILTER_INPUTS(array)
This structure describes decoded (raw) audio or video data.
static CIFilter * create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
Create a filter object by a given name and set all options to defaults.
CGColorSpaceRef color_space
Common color space for input image and cgcontext.
static const AVFilterPad vf_coreimage_outputs[]
void(* filter)(uint8_t *src, int stride, int qscale)
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
const char * name
Filter name.
static const AVOption coreimage_options[]
A link between two filters.
Link properties exposed to filter code, but not external callers.
const AVFilter ff_vf_coreimage
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Apply all valid filters successively to the input image.
void * priv
private data for use by the filter
static const AVFilterPad vsrc_coreimagesrc_outputs[]
static int request_frame(AVFilterLink *link)
static av_cold int init(AVFilterContext *fctx)
A filter pad used for either input or output.
static int config_output(AVFilterLink *link)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
AVFrame * picref
cached reference containing the painted picture
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
AVRational time_base
stream time base
#define FILTER_OUTPUTS(array)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
static int config_input(AVFilterLink *link)
Determine image properties from input link of filter chain.
int av_log_get_level(void)
Get the current log level.
Describe the class of an AVClass context structure.
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Rational number (pair of numerator and denominator).
CFTypeRef * filters
CIFilter object for all requested filters.
int bits_per_component
Shared bpc for input-output operation.
@ AV_PICTURE_TYPE_I
Intra.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static FilterLink * ff_filter_link(AVFilterLink *link)
int64_t pts
increasing presentation time stamp
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
AVRational sar
sample aspect ratio
CFTypeRef input_image
Input image container for passing into Core Image API.
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
AVRational time_base
Time base for the timestamps in this frame.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
#define AVERROR_EXTERNAL
Generic error in an external library.
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
and forward the test the status of outputs and forward it to the corresponding return FFERROR_NOT_READY If the filters stores internally one or a few frame for some input
#define AV_LOG_INFO
Standard information.
@ AV_PIX_FMT_ARGB
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
static void list_filters(CoreImageContext *ctx)
Print a list of all available filters including options and respective value ranges and defaults.
#define i(width, name, range_min, range_max)
static const AVOption coreimagesrc_options[]
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
char * filter_string
The complete user provided filter definition.
const char * name
Pad name.
static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
Set an option of the given filter to the provided key-value pair.
#define AV_FRAME_FLAG_INTERLACED
A flag to mark frames whose content is interlaced.
void * av_calloc(size_t nmemb, size_t size)
AVRational frame_rate
video frame rate
char * output_rect
Rectangle to be filled with filter intput.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
int num_filters
Amount of filters in *filters.
int list_generators
Option used to list all available generators.
static av_cold int init_src(AVFilterContext *fctx)
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
int is_video_source
filter is used as video source
static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
#define FF_DISABLE_DEPRECATION_WARNINGS
static av_cold void uninit(AVFilterContext *fctx)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
static const AVFilterPad vf_coreimage_inputs[]
#define GENERATOR_OPTIONS
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable.
#define SafeCFRelease(ptr)
#define FILTER_SINGLE_PIXFMT(pix_fmt_)
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
int list_filters
Option used to list all available filters including generators.