FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
vf_coreimage.m
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Thilo Borgmann
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Video processing based on Apple's CoreImage API
24  */
25 
26 #import <QuartzCore/CoreImage.h>
27 #import <AppKit/AppKit.h>
28 
29 #include "avfilter.h"
30 #include "formats.h"
31 #include "internal.h"
32 #include "video.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 
37 typedef struct CoreImageContext {
38  const AVClass *class;
39 
40  int is_video_source; ///< filter is used as video source
41 
42  int w, h; ///< video size
43  AVRational sar; ///< sample aspect ratio
44  AVRational frame_rate; ///< video frame rate
45  AVRational time_base; ///< stream time base
46  int64_t duration; ///< duration expressed in microseconds
47  int64_t pts; ///< increasing presentation time stamp
48  AVFrame *picref; ///< cached reference containing the painted picture
49 
50  CFTypeRef glctx; ///< OpenGL context
51  CGContextRef cgctx; ///< Bitmap context for image copy
52  CFTypeRef input_image; ///< Input image container for passing into Core Image API
53  CGColorSpaceRef color_space; ///< Common color space for input image and cgcontext
54  int bits_per_component; ///< Shared bpc for input-output operation
55 
56  char *filter_string; ///< The complete user provided filter definition
57  CFTypeRef *filters; ///< CIFilter object for all requested filters
58  int num_filters; ///< Amount of filters in *filters
59 
60  char *output_rect; ///< Rectangle to be filled with filter intput
61  int list_filters; ///< Option used to list all available filters including generators
62  int list_generators; ///< Option used to list all available generators
64 
65 static int config_output(AVFilterLink *link)
66 {
67  CoreImageContext *ctx = link->src->priv;
68 
69  link->w = ctx->w;
70  link->h = ctx->h;
71  link->sample_aspect_ratio = ctx->sar;
72  link->frame_rate = ctx->frame_rate;
73  link->time_base = ctx->time_base;
74 
77 
78  return 0;
79 }
80 
81 /** Determine image properties from input link of filter chain.
82  */
83 static int config_input(AVFilterLink *link)
84 {
85  CoreImageContext *ctx = link->dst->priv;
88 
89  return 0;
90 }
91 
92 /** Print a list of all available filters including options and respective value ranges and defaults.
93  */
95 {
96  // querying filters and attributes
97  NSArray *filter_categories = nil;
98 
99  if (ctx->list_generators && !ctx->list_filters) {
100  filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
101  }
102 
103  NSArray *filter_names = [CIFilter filterNamesInCategories:filter_categories];
104  NSEnumerator *filters = [filter_names objectEnumerator];
105 
106  NSString *filter_name;
107  while (filter_name = [filters nextObject]) {
108  av_log(ctx, AV_LOG_INFO, "Filter: %s\n", [filter_name UTF8String]);
109  NSString *input;
110 
111  CIFilter *filter = [CIFilter filterWithName:filter_name];
112  NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
113  NSArray *filter_inputs = [filter inputKeys]; // <nsstring>
114 
115  for (input in filter_inputs) {
116  NSDictionary *input_attribs = [filter_attribs valueForKey:input];
117  NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
118  if ([input_class isEqualToString:@"NSNumber"]) {
119  NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
120  NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
121  NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
122 
123  av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\t[%s %s][%s]\n",
124  [input UTF8String],
125  [input_class UTF8String],
126  [[value_min stringValue] UTF8String],
127  [[value_max stringValue] UTF8String],
128  [[value_default stringValue] UTF8String]);
129  } else {
130  av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\n",
131  [input UTF8String],
132  [input_class UTF8String]);
133  }
134  }
135  }
136 }
137 
139 {
140  static const enum AVPixelFormat inout_fmts_rgb[] = {
143  };
144 
145  AVFilterFormats *inout_formats;
146  int ret;
147 
148  if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
149  return AVERROR(ENOMEM);
150  }
151 
152  if ((ret = ff_formats_ref(inout_formats, &fctx->inputs[0]->out_formats)) < 0 ||
153  (ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
154  return ret;
155  }
156 
157  return 0;
158 }
159 
161 {
162  static const enum AVPixelFormat inout_fmts_rgb[] = {
165  };
166 
167  AVFilterFormats *inout_formats;
168  int ret;
169 
170  if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
171  return AVERROR(ENOMEM);
172  }
173 
174  if ((ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
175  return ret;
176  }
177 
178  return 0;
179 }
180 
182 {
183  int i;
184 
185  // (re-)initialize input image
186  const CGSize frame_size = {
187  frame->width,
188  frame->height
189  };
190 
191  NSData *data = [NSData dataWithBytesNoCopy:frame->data[0]
192  length:frame->height*frame->linesize[0]
193  freeWhenDone:NO];
194 
195  CIImage *ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
196  bytesPerRow:frame->linesize[0]
197  size:frame_size
198  format:kCIFormatARGB8
199  colorSpace:ctx->color_space]; //kCGColorSpaceGenericRGB
200  if (!ret) {
201  av_log(ctx, AV_LOG_ERROR, "Input image could not be initialized.\n");
202  return AVERROR_EXTERNAL;
203  }
204 
205  CIFilter *filter = NULL;
206  CIImage *filter_input = (__bridge CIImage*)ctx->input_image;
207  CIImage *filter_output = NULL;
208 
209  // successively apply all filters
210  for (i = 0; i < ctx->num_filters; i++) {
211  if (i) {
212  // set filter input to previous filter output
213  filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
214  CGRect out_rect = [filter_input extent];
215  if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
216  // do not keep padded image regions after filtering
217  out_rect.origin.x = 0.0f;
218  out_rect.origin.y = 0.0f;
219  out_rect.size.width = frame->width;
220  out_rect.size.height = frame->height;
221  }
222  filter_input = [filter_input imageByCroppingToRect:out_rect];
223  }
224 
225  filter = (__bridge CIFilter*)ctx->filters[i];
226 
227  // do not set input image for the first filter if used as video source
228  if (!ctx->is_video_source || i) {
229  @try {
230  [filter setValue:filter_input forKey:kCIInputImageKey];
231  } @catch (NSException *exception) {
232  if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
233  av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
234  return AVERROR_EXTERNAL;
235  } else {
236  av_log(ctx, AV_LOG_WARNING, "Selected filter does not accept an input image.\n");
237  }
238  }
239  }
240  }
241 
242  // get output of last filter
243  filter_output = [filter valueForKey:kCIOutputImageKey];
244 
245  if (!filter_output) {
246  av_log(ctx, AV_LOG_ERROR, "Filter output not available.\n");
247  return AVERROR_EXTERNAL;
248  }
249 
250  // do not keep padded image regions after filtering
251  CGRect out_rect = [filter_output extent];
252  if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
253  av_log(ctx, AV_LOG_DEBUG, "Cropping output image.\n");
254  out_rect.origin.x = 0.0f;
255  out_rect.origin.y = 0.0f;
256  out_rect.size.width = frame->width;
257  out_rect.size.height = frame->height;
258  }
259 
260  CGImageRef out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
261  fromRect:out_rect];
262 
263  if (!out) {
264  av_log(ctx, AV_LOG_ERROR, "Cannot create valid output image.\n");
265  }
266 
267  // create bitmap context on the fly for rendering into current frame->data[]
268  if (ctx->cgctx) {
269  CGContextRelease(ctx->cgctx);
270  ctx->cgctx = NULL;
271  }
272  size_t out_width = CGImageGetWidth(out);
273  size_t out_height = CGImageGetHeight(out);
274 
275  if (out_width > frame->width || out_height > frame->height) { // this might result in segfault
276  av_log(ctx, AV_LOG_WARNING, "Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
277  out_width, out_height, frame->width, frame->height);
278  }
279  ctx->cgctx = CGBitmapContextCreate(frame->data[0],
280  frame->width,
281  frame->height,
282  ctx->bits_per_component,
283  frame->linesize[0],
284  ctx->color_space,
285  (uint32_t)kCGImageAlphaPremultipliedFirst); // ARGB
286  if (!ctx->cgctx) {
287  av_log(ctx, AV_LOG_ERROR, "CGBitmap context cannot be created.\n");
288  return AVERROR_EXTERNAL;
289  }
290 
291  // copy ("draw") the output image into the frame data
292  CGRect rect = {{0,0},{frame->width, frame->height}};
293  if (ctx->output_rect) {
294  @try {
295  NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
296  NSRect tmp = NSRectFromString(tmp_string);
297  rect = NSRectToCGRect(tmp);
298  } @catch (NSException *exception) {
299  av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
300  return AVERROR_EXTERNAL;
301  }
302  if (rect.size.width == 0.0f) {
303  av_log(ctx, AV_LOG_WARNING, "Width of output rect is zero.\n");
304  }
305  if (rect.size.height == 0.0f) {
306  av_log(ctx, AV_LOG_WARNING, "Height of output rect is zero.\n");
307  }
308  }
309 
310  CGContextDrawImage(ctx->cgctx, rect, out);
311 
312  return ff_filter_frame(link, frame);
313 }
314 
315 /** Apply all valid filters successively to the input image.
316  * The final output image is copied from the GPU by "drawing" using a bitmap context.
317  */
319 {
320  return apply_filter(link->dst->priv, link->dst->outputs[0], frame);
321 }
322 
323 static int request_frame(AVFilterLink *link)
324 {
325  CoreImageContext *ctx = link->src->priv;
326  AVFrame *frame;
327 
328  if (ctx->duration >= 0 &&
329  av_rescale_q(ctx->pts, ctx->time_base, AV_TIME_BASE_Q) >= ctx->duration) {
330  return AVERROR_EOF;
331  }
332 
333  if (!ctx->picref) {
334  ctx->picref = ff_get_video_buffer(link, ctx->w, ctx->h);
335  if (!ctx->picref) {
336  return AVERROR(ENOMEM);
337  }
338  }
339 
340  frame = av_frame_clone(ctx->picref);
341  if (!frame) {
342  return AVERROR(ENOMEM);
343  }
344 
345  frame->pts = ctx->pts;
346  frame->key_frame = 1;
347  frame->interlaced_frame = 0;
348  frame->pict_type = AV_PICTURE_TYPE_I;
349  frame->sample_aspect_ratio = ctx->sar;
350 
351  ctx->pts++;
352 
353  return apply_filter(ctx, link, frame);
354 }
355 
356 /** Set an option of the given filter to the provided key-value pair.
357  */
358 static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
359 {
360  NSString *input_key = [NSString stringWithUTF8String:key];
361  NSString *input_val = [NSString stringWithUTF8String:value];
362 
363  NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
364  NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
365 
366  NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
367  NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
368 
369  if (!input_attribs) {
370  av_log(ctx, AV_LOG_WARNING, "Skipping unknown option: \"%s\".\n",
371  [input_key UTF8String]); // [[filter name] UTF8String]) not currently defined...
372  return;
373  }
374 
375  av_log(ctx, AV_LOG_DEBUG, "key: %s, val: %s, #attribs: %lu, class: %s, type: %s\n",
376  [input_key UTF8String],
377  [input_val UTF8String],
378  input_attribs ? (unsigned long)[input_attribs count] : -1,
379  [input_class UTF8String],
380  [input_type UTF8String]);
381 
382  if ([input_class isEqualToString:@"NSNumber"]) {
383  float input = input_val.floatValue;
384  NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
385  NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
386  NSNumber *used_value = nil;
387 
388 #define CLAMP_WARNING do { \
389 av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
390  input, \
391  [input_key UTF8String], \
392  min_value.floatValue, \
393  max_value.floatValue, \
394  used_value.floatValue); \
395 } while(0)
396  if (input > max_value.floatValue) {
397  used_value = max_value;
399  } else if (input < min_value.floatValue) {
400  used_value = min_value;
402  } else {
403  used_value = [NSNumber numberWithFloat:input];
404  }
405 
406  [filter setValue:used_value forKey:input_key];
407  } else if ([input_class isEqualToString:@"CIVector"]) {
408  CIVector *input = [CIVector vectorWithString:input_val];
409 
410  if (!input) {
411  av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIVctor description: \"%s\".\n",
412  [input_val UTF8String]);
413  return;
414  }
415 
416  [filter setValue:input forKey:input_key];
417  } else if ([input_class isEqualToString:@"CIColor"]) {
418  CIColor *input = [CIColor colorWithString:input_val];
419 
420  if (!input) {
421  av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIColor description: \"%s\".\n",
422  [input_val UTF8String]);
423  return;
424  }
425 
426  [filter setValue:input forKey:input_key];
427  } else if ([input_class isEqualToString:@"NSString"]) { // set display name as string with latin1 encoding
428  [filter setValue:input_val forKey:input_key];
429  } else if ([input_class isEqualToString:@"NSData"]) { // set display name as string with latin1 encoding
430  NSData *input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
431  length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
432 
433  if (!input) {
434  av_log(ctx, AV_LOG_WARNING, "Skipping invalid NSData description: \"%s\".\n",
435  [input_val UTF8String]);
436  return;
437  }
438 
439  [filter setValue:input forKey:input_key];
440  } else {
441  av_log(ctx, AV_LOG_WARNING, "Skipping unsupported option class: \"%s\".\n",
442  [input_class UTF8String]);
443  avpriv_report_missing_feature(ctx, "Handling of some option classes");
444  return;
445  }
446 }
447 
448 /** Create a filter object by a given name and set all options to defaults.
449  * Overwrite any option given by the user to the provided value in filter_options.
450  */
451 static CIFilter* create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
452 {
453  // create filter object
454  CIFilter *filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
455 
456  // set default options
457  [filter setDefaults];
458 
459  // set user options
460  if (filter_options) {
461  AVDictionaryEntry *o = NULL;
462  while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
463  set_option(ctx, filter, o->key, o->value);
464  }
465  }
466 
467  return filter;
468 }
469 
470 static av_cold int init(AVFilterContext *fctx)
471 {
472  CoreImageContext *ctx = fctx->priv;
473  AVDictionary *filter_dict = NULL;
474  AVDictionaryEntry *f = NULL;
475  AVDictionaryEntry *o = NULL;
476  int ret;
477  int i;
478 
479  if (ctx->list_filters || ctx->list_generators) {
480  list_filters(ctx);
481  return AVERROR_EXIT;
482  }
483 
484  if (ctx->filter_string) {
485  // parse filter string (filter=name@opt=val@opt2=val2#name2@opt3=val3) for filters separated by #
486  av_log(ctx, AV_LOG_DEBUG, "Filter_string: %s\n", ctx->filter_string);
487  ret = av_dict_parse_string(&filter_dict, ctx->filter_string, "@", "#", AV_DICT_MULTIKEY); // parse filter_name:all_filter_options
488  if (ret) {
489  av_log(ctx, AV_LOG_ERROR, "Parsing of filters failed.\n");
490  return AVERROR(EIO);
491  }
492  ctx->num_filters = av_dict_count(filter_dict);
493  av_log(ctx, AV_LOG_DEBUG, "Filter count: %i\n", ctx->num_filters);
494 
495  // allocate CIFilter array
496  ctx->filters = av_mallocz_array(ctx->num_filters, sizeof(CIFilter*));
497  if (!ctx->filters) {
498  av_log(ctx, AV_LOG_ERROR, "Could not allocate filter array.\n");
499  return AVERROR(ENOMEM);
500  }
501 
502  // parse filters for option key-value pairs (opt=val@opt2=val2) separated by @
503  i = 0;
504  while ((f = av_dict_get(filter_dict, "", f, AV_DICT_IGNORE_SUFFIX))) {
505  AVDictionary *filter_options = NULL;
506 
507  if (strncmp(f->value, "default", 7)) { // not default
508  ret = av_dict_parse_string(&filter_options, f->value, "=", "@", 0); // parse option_name:option_value
509  if (ret) {
510  av_log(ctx, AV_LOG_ERROR, "Parsing of filter options for \"%s\" failed.\n", f->key);
511  return AVERROR(EIO);
512  }
513  }
514 
515  if (av_log_get_level() >= AV_LOG_DEBUG) {
516  av_log(ctx, AV_LOG_DEBUG, "Creating filter %i: \"%s\":\n", i, f->key);
517  if (!filter_options) {
518  av_log(ctx, AV_LOG_DEBUG, "\tusing default options\n");
519  } else {
520  while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
521  av_log(ctx, AV_LOG_DEBUG, "\t%s: %s\n", o->key, o->value);
522  }
523  }
524  }
525 
526  ctx->filters[i] = CFBridgingRetain(create_filter(ctx, f->key, filter_options));
527  if (!ctx->filters[i]) {
528  av_log(ctx, AV_LOG_ERROR, "Could not create filter \"%s\".\n", f->key);
529  return AVERROR(EINVAL);
530  }
531 
532  i++;
533  }
534  } else {
535  av_log(ctx, AV_LOG_ERROR, "No filters specified.\n");
536  return AVERROR(EINVAL);
537  }
538 
539  // create GPU context on OSX
540  const NSOpenGLPixelFormatAttribute attr[] = {
541  NSOpenGLPFAAccelerated,
542  NSOpenGLPFANoRecovery,
543  NSOpenGLPFAColorSize, 32,
544  0
545  };
546 
547  NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
548  ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
549  ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
550  pixelFormat:[pixel_format CGLPixelFormatObj]
551  colorSpace:ctx->color_space
552  options:nil]);
553 
554  if (!ctx->glctx) {
555  av_log(ctx, AV_LOG_ERROR, "CIContext not created.\n");
556  return AVERROR_EXTERNAL;
557  }
558 
559  // Creating an empty input image as input container for the context
560  ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
561 
562  return 0;
563 }
564 
566 {
567  CoreImageContext *ctx = fctx->priv;
568 
569  ctx->is_video_source = 1;
570  ctx->time_base = av_inv_q(ctx->frame_rate);
571  ctx->pts = 0;
572 
573  return init(fctx);
574 }
575 
576 static av_cold void uninit(AVFilterContext *fctx)
577 {
578 #define SafeCFRelease(ptr) do { \
579  if (ptr) { \
580  CFRelease(ptr); \
581  ptr = NULL; \
582  } \
583 } while (0)
584 
585  CoreImageContext *ctx = fctx->priv;
586 
587  SafeCFRelease(ctx->glctx);
588  SafeCFRelease(ctx->cgctx);
591 
592  if (ctx->filters) {
593  for (int i = 0; i < ctx->num_filters; i++) {
594  SafeCFRelease(ctx->filters[i]);
595  }
596  av_freep(&ctx->filters);
597  }
598 
599  av_frame_free(&ctx->picref);
600 }
601 
603  {
604  .name = "default",
605  .type = AVMEDIA_TYPE_VIDEO,
606  .filter_frame = filter_frame,
607  .config_props = config_input,
608  },
609  { NULL }
610 };
611 
613  {
614  .name = "default",
615  .type = AVMEDIA_TYPE_VIDEO,
616  },
617  { NULL }
618 };
619 
621  {
622  .name = "default",
623  .type = AVMEDIA_TYPE_VIDEO,
624  .request_frame = request_frame,
625  .config_props = config_output,
626  },
627  { NULL }
628 };
629 
630 #define OFFSET(x) offsetof(CoreImageContext, x)
631 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
632 
633 #define GENERATOR_OPTIONS \
634  {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
635  {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
636  {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
637  {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS}, \
638  {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
639  {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
640  {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
641 
642 #define FILTER_OPTIONS \
643  {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
644  {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
645  {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
646  {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
647 
648 
649 // definitions for coreimage video filter
650 static const AVOption coreimage_options[] = {
652  { NULL }
653 };
654 
655 AVFILTER_DEFINE_CLASS(coreimage);
656 
658  .name = "coreimage",
659  .description = NULL_IF_CONFIG_SMALL("Video filtering using CoreImage API."),
660  .init = init,
661  .uninit = uninit,
662  .priv_size = sizeof(CoreImageContext),
663  .priv_class = &coreimage_class,
664  .inputs = vf_coreimage_inputs,
665  .outputs = vf_coreimage_outputs,
667 };
668 
669 // definitions for coreimagesrc video source
670 static const AVOption coreimagesrc_options[] = {
673  { NULL }
674 };
675 
676 AVFILTER_DEFINE_CLASS(coreimagesrc);
677 
679  .name = "coreimagesrc",
680  .description = NULL_IF_CONFIG_SMALL("Video source using image generators of CoreImage API."),
681  .init = init_src,
682  .uninit = uninit,
683  .priv_size = sizeof(CoreImageContext),
684  .priv_class = &coreimagesrc_class,
685  .inputs = NULL,
686  .outputs = vsrc_coreimagesrc_outputs,
688 };
#define NULL
Definition: coverity.c:32
CGColorSpaceRef color_space
Common color space for input image and cgcontext.
Definition: vf_coreimage.m:53
#define FILTER_OPTIONS
Definition: vf_coreimage.m:642
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2222
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
AVOption.
Definition: opt.h:245
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
Main libavfilter public API header.
const char * desc
Definition: nvenc.c:89
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:2174
static int config_output(AVFilterLink *link)
Definition: vf_coreimage.m:65
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:34
static av_cold void uninit(AVFilterContext *fctx)
Definition: vf_coreimage.m:576
static int config_input(AVFilterLink *link)
Determine image properties from input link of filter chain.
Definition: vf_coreimage.m:83
CFTypeRef glctx
OpenGL context.
Definition: vf_coreimage.m:50
int list_filters
Option used to list all available filters including generators.
Definition: vf_coreimage.m:61
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:76
#define SafeCFRelease(ptr)
#define GENERATOR_OPTIONS
Definition: vf_coreimage.m:633
int64_t duration
duration expressed in microseconds
Definition: vf_coreimage.m:46
static int query_formats_src(AVFilterContext *fctx)
Definition: vf_coreimage.m:160
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
static int query_formats(AVFilterContext *fctx)
Definition: vf_coreimage.m:138
const char * name
Pad name.
Definition: internal.h:59
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:313
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1180
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:83
#define av_cold
Definition: attributes.h:82
AVOptions.
static const AVFilterPad vf_coreimage_inputs[]
Definition: vf_coreimage.m:602
AVFilter ff_vf_coreimage
Definition: vf_coreimage.m:657
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
static const AVOption coreimage_options[]
Definition: vf_coreimage.m:650
static AVFrame * frame
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
#define AVERROR_EOF
End of file.
Definition: error.h:55
static void list_filters(CoreImageContext *ctx)
Print a list of all available filters including options and respective value ranges and defaults...
Definition: vf_coreimage.m:94
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:318
const OptionDef options[]
Definition: ffserver.c:3969
#define av_log(a,...)
A filter pad used for either input or output.
Definition: internal.h:53
AVRational time_base
stream time base
Definition: vf_coreimage.m:45
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVFrame * picref
cached reference containing the painted picture
Definition: vf_coreimage.m:48
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:176
void * priv
private data for use by the filter
Definition: avfilter.h:320
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
CFTypeRef input_image
Input image container for passing into Core Image API.
Definition: vf_coreimage.m:52
int av_log_get_level(void)
Get the current log level.
Definition: log.c:386
AVRational sar
sample aspect ratio
Definition: vf_coreimage.m:43
GLsizei count
Definition: opengl_enc.c:109
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:93
static CIFilter * create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
Create a filter object by a given name and set all options to defaults.
Definition: vf_coreimage.m:451
common internal API header
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
CFTypeRef * filters
CIFilter object for all requested filters.
Definition: vf_coreimage.m:57
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:109
static int request_frame(AVFilterLink *link)
Definition: vf_coreimage.m:323
AVFormatContext * ctx
Definition: movenc.c:48
int h
video size
Definition: vf_coreimage.m:42
static av_cold int init_src(AVFilterContext *fctx)
Definition: vf_coreimage.m:565
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:471
int64_t pts
increasing presentation time stamp
Definition: vf_coreimage.m:47
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:179
char * output_rect
Rectangle to be filled with filter intput.
Definition: vf_coreimage.m:60
static int filter_frame(AVFilterLink *link, AVFrame *frame)
Apply all valid filters successively to the input image.
Definition: vf_coreimage.m:318
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
int frame_size
Definition: mxfenc.c:1821
static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
Definition: vf_coreimage.m:181
int bits_per_component
Shared bpc for input-output operation.
Definition: vf_coreimage.m:54
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static av_cold int init(AVFilterContext *fctx)
Definition: vf_coreimage.m:470
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
int num_filters
Amount of filters in *filters.
Definition: vf_coreimage.m:58
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:142
Definition: f_ebur128.c:91
rational number numerator/denominator
Definition: rational.h:43
const char * name
Filter name.
Definition: avfilter.h:146
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:317
static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
Set an option of the given filter to the provided key-value pair.
Definition: vf_coreimage.m:358
void avpriv_report_missing_feature(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
int is_video_source
filter is used as video source
Definition: vf_coreimage.m:40
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
int list_generators
Option used to list all available generators.
Definition: vf_coreimage.m:62
static const AVOption coreimagesrc_options[]
Definition: vf_coreimage.m:670
if(ret< 0)
Definition: vf_mcdeint.c:282
AVRational frame_rate
video frame rate
Definition: vf_coreimage.m:44
char * key
Definition: dict.h:86
#define CLAMP_WARNING
char * filter_string
The complete user provided filter definition.
Definition: vf_coreimage.m:56
AVFILTER_DEFINE_CLASS(coreimage)
char * value
Definition: dict.h:87
static uint8_t tmp[8]
Definition: des.c:38
static const struct PPFilter filters[]
Definition: postprocess.c:137
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
A list of supported formats for one end of a filter link.
Definition: formats.h:64
CGContextRef cgctx
Bitmap context for image copy.
Definition: vf_coreimage.m:51
An instance of a filter.
Definition: avfilter.h:305
static const AVFilterPad vf_coreimage_outputs[]
Definition: vf_coreimage.m:612
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
#define av_freep(p)
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
internal API functions
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
AVFilter ff_vsrc_coreimagesrc
Definition: vf_coreimage.m:678
for(j=16;j >0;--j)
const char * name
Definition: opengl_enc.c:103
static const AVFilterPad vsrc_coreimagesrc_outputs[]
Definition: vf_coreimage.m:620