PageRenderTime 66ms CodeModel.GetById 21ms RepoModel.GetById 0ms app.codeStats 0ms

/libavfilter/vf_coreimage.m

https://gitlab.com/sjchen1981/FFmpeg
Objective C | 688 lines | 522 code | 111 blank | 55 comment | 75 complexity | 9ba3e116c849bc96737af2371f70a221 MD5 | raw file
  1. /*
  2. * Copyright (c) 2016 Thilo Borgmann
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Video processing based on Apple's CoreImage API
  23. */
  24. #import <QuartzCore/CoreImage.h>
  25. #import <AppKit/AppKit.h>
  26. #include "avfilter.h"
  27. #include "formats.h"
  28. #include "internal.h"
  29. #include "video.h"
  30. #include "libavutil/internal.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/pixdesc.h"
  33. typedef struct CoreImageContext {
  34. const AVClass *class;
  35. int is_video_source; ///< filter is used as video source
  36. int w, h; ///< video size
  37. AVRational sar; ///< sample aspect ratio
  38. AVRational frame_rate; ///< video frame rate
  39. AVRational time_base; ///< stream time base
  40. int64_t duration; ///< duration expressed in microseconds
  41. int64_t pts; ///< increasing presentation time stamp
  42. AVFrame *picref; ///< cached reference containing the painted picture
  43. CFTypeRef glctx; ///< OpenGL context
  44. CGContextRef cgctx; ///< Bitmap context for image copy
  45. CFTypeRef input_image; ///< Input image container for passing into Core Image API
  46. CGColorSpaceRef color_space; ///< Common color space for input image and cgcontext
  47. int bits_per_component; ///< Shared bpc for input-output operation
  48. char *filter_string; ///< The complete user provided filter definition
  49. CFTypeRef *filters; ///< CIFilter object for all requested filters
  50. int num_filters; ///< Amount of filters in *filters
  51. char *output_rect; ///< Rectangle to be filled with filter intput
  52. int list_filters; ///< Option used to list all available filters including generators
  53. int list_generators; ///< Option used to list all available generators
  54. } CoreImageContext;
  55. static int config_output(AVFilterLink *link)
  56. {
  57. CoreImageContext *ctx = link->src->priv;
  58. link->w = ctx->w;
  59. link->h = ctx->h;
  60. link->sample_aspect_ratio = ctx->sar;
  61. link->frame_rate = ctx->frame_rate;
  62. link->time_base = ctx->time_base;
  63. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  64. ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
  65. return 0;
  66. }
  67. /** Determine image properties from input link of filter chain.
  68. */
  69. static int config_input(AVFilterLink *link)
  70. {
  71. CoreImageContext *ctx = link->dst->priv;
  72. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
  73. ctx->bits_per_component = av_get_bits_per_pixel(desc) / desc->nb_components;
  74. return 0;
  75. }
  76. /** Print a list of all available filters including options and respective value ranges and defaults.
  77. */
  78. static void list_filters(CoreImageContext *ctx)
  79. {
  80. // querying filters and attributes
  81. NSArray *filter_categories = nil;
  82. if (ctx->list_generators && !ctx->list_filters) {
  83. filter_categories = [NSArray arrayWithObjects:kCICategoryGenerator, nil];
  84. }
  85. NSArray *filter_names = [CIFilter filterNamesInCategories:filter_categories];
  86. NSEnumerator *filters = [filter_names objectEnumerator];
  87. NSString *filter_name;
  88. while (filter_name = [filters nextObject]) {
  89. av_log(ctx, AV_LOG_INFO, "Filter: %s\n", [filter_name UTF8String]);
  90. NSString *input;
  91. CIFilter *filter = [CIFilter filterWithName:filter_name];
  92. NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
  93. NSArray *filter_inputs = [filter inputKeys]; // <nsstring>
  94. for (input in filter_inputs) {
  95. NSDictionary *input_attribs = [filter_attribs valueForKey:input];
  96. NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
  97. if ([input_class isEqualToString:@"NSNumber"]) {
  98. NSNumber *value_default = [input_attribs valueForKey:kCIAttributeDefault];
  99. NSNumber *value_min = [input_attribs valueForKey:kCIAttributeSliderMin];
  100. NSNumber *value_max = [input_attribs valueForKey:kCIAttributeSliderMax];
  101. av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\t[%s %s][%s]\n",
  102. [input UTF8String],
  103. [input_class UTF8String],
  104. [[value_min stringValue] UTF8String],
  105. [[value_max stringValue] UTF8String],
  106. [[value_default stringValue] UTF8String]);
  107. } else {
  108. av_log(ctx, AV_LOG_INFO, "\tOption: %s\t[%s]\n",
  109. [input UTF8String],
  110. [input_class UTF8String]);
  111. }
  112. }
  113. }
  114. }
  115. static int query_formats(AVFilterContext *fctx)
  116. {
  117. static const enum AVPixelFormat inout_fmts_rgb[] = {
  118. AV_PIX_FMT_ARGB,
  119. AV_PIX_FMT_NONE
  120. };
  121. AVFilterFormats *inout_formats;
  122. int ret;
  123. if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
  124. return AVERROR(ENOMEM);
  125. }
  126. if ((ret = ff_formats_ref(inout_formats, &fctx->inputs[0]->out_formats)) < 0 ||
  127. (ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
  128. return ret;
  129. }
  130. return 0;
  131. }
  132. static int query_formats_src(AVFilterContext *fctx)
  133. {
  134. static const enum AVPixelFormat inout_fmts_rgb[] = {
  135. AV_PIX_FMT_ARGB,
  136. AV_PIX_FMT_NONE
  137. };
  138. AVFilterFormats *inout_formats;
  139. int ret;
  140. if (!(inout_formats = ff_make_format_list(inout_fmts_rgb))) {
  141. return AVERROR(ENOMEM);
  142. }
  143. if ((ret = ff_formats_ref(inout_formats, &fctx->outputs[0]->in_formats)) < 0) {
  144. return ret;
  145. }
  146. return 0;
  147. }
  148. static int apply_filter(CoreImageContext *ctx, AVFilterLink *link, AVFrame *frame)
  149. {
  150. int i;
  151. // (re-)initialize input image
  152. const CGSize frame_size = {
  153. frame->width,
  154. frame->height
  155. };
  156. NSData *data = [NSData dataWithBytesNoCopy:frame->data[0]
  157. length:frame->height*frame->linesize[0]
  158. freeWhenDone:NO];
  159. CIImage *ret = [(__bridge CIImage*)ctx->input_image initWithBitmapData:data
  160. bytesPerRow:frame->linesize[0]
  161. size:frame_size
  162. format:kCIFormatARGB8
  163. colorSpace:ctx->color_space]; //kCGColorSpaceGenericRGB
  164. if (!ret) {
  165. av_log(ctx, AV_LOG_ERROR, "Input image could not be initialized.\n");
  166. return AVERROR_EXTERNAL;
  167. }
  168. CIFilter *filter = NULL;
  169. CIImage *filter_input = (__bridge CIImage*)ctx->input_image;
  170. CIImage *filter_output = NULL;
  171. // successively apply all filters
  172. for (i = 0; i < ctx->num_filters; i++) {
  173. if (i) {
  174. // set filter input to previous filter output
  175. filter_input = [(__bridge CIImage*)ctx->filters[i-1] valueForKey:kCIOutputImageKey];
  176. CGRect out_rect = [filter_input extent];
  177. if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
  178. // do not keep padded image regions after filtering
  179. out_rect.origin.x = 0.0f;
  180. out_rect.origin.y = 0.0f;
  181. out_rect.size.width = frame->width;
  182. out_rect.size.height = frame->height;
  183. }
  184. filter_input = [filter_input imageByCroppingToRect:out_rect];
  185. }
  186. filter = (__bridge CIFilter*)ctx->filters[i];
  187. // do not set input image for the first filter if used as video source
  188. if (!ctx->is_video_source || i) {
  189. @try {
  190. [filter setValue:filter_input forKey:kCIInputImageKey];
  191. } @catch (NSException *exception) {
  192. if (![[exception name] isEqualToString:NSUndefinedKeyException]) {
  193. av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
  194. return AVERROR_EXTERNAL;
  195. } else {
  196. av_log(ctx, AV_LOG_WARNING, "Selected filter does not accept an input image.\n");
  197. }
  198. }
  199. }
  200. }
  201. // get output of last filter
  202. filter_output = [filter valueForKey:kCIOutputImageKey];
  203. if (!filter_output) {
  204. av_log(ctx, AV_LOG_ERROR, "Filter output not available.\n");
  205. return AVERROR_EXTERNAL;
  206. }
  207. // do not keep padded image regions after filtering
  208. CGRect out_rect = [filter_output extent];
  209. if (out_rect.size.width > frame->width || out_rect.size.height > frame->height) {
  210. av_log(ctx, AV_LOG_DEBUG, "Cropping output image.\n");
  211. out_rect.origin.x = 0.0f;
  212. out_rect.origin.y = 0.0f;
  213. out_rect.size.width = frame->width;
  214. out_rect.size.height = frame->height;
  215. }
  216. CGImageRef out = [(__bridge CIContext*)ctx->glctx createCGImage:filter_output
  217. fromRect:out_rect];
  218. if (!out) {
  219. av_log(ctx, AV_LOG_ERROR, "Cannot create valid output image.\n");
  220. }
  221. // create bitmap context on the fly for rendering into current frame->data[]
  222. if (ctx->cgctx) {
  223. CGContextRelease(ctx->cgctx);
  224. ctx->cgctx = NULL;
  225. }
  226. size_t out_width = CGImageGetWidth(out);
  227. size_t out_height = CGImageGetHeight(out);
  228. if (out_width > frame->width || out_height > frame->height) { // this might result in segfault
  229. av_log(ctx, AV_LOG_WARNING, "Output image has unexpected size: %lux%lu (expected: %ix%i). This may crash...\n",
  230. out_width, out_height, frame->width, frame->height);
  231. }
  232. ctx->cgctx = CGBitmapContextCreate(frame->data[0],
  233. frame->width,
  234. frame->height,
  235. ctx->bits_per_component,
  236. frame->linesize[0],
  237. ctx->color_space,
  238. (uint32_t)kCGImageAlphaPremultipliedFirst); // ARGB
  239. if (!ctx->cgctx) {
  240. av_log(ctx, AV_LOG_ERROR, "CGBitmap context cannot be created.\n");
  241. return AVERROR_EXTERNAL;
  242. }
  243. // copy ("draw") the output image into the frame data
  244. CGRect rect = {{0,0},{frame->width, frame->height}};
  245. if (ctx->output_rect) {
  246. @try {
  247. NSString *tmp_string = [NSString stringWithUTF8String:ctx->output_rect];
  248. NSRect tmp = NSRectFromString(tmp_string);
  249. rect = NSRectToCGRect(tmp);
  250. } @catch (NSException *exception) {
  251. av_log(ctx, AV_LOG_ERROR, "An error occurred: %s.", [exception.reason UTF8String]);
  252. return AVERROR_EXTERNAL;
  253. }
  254. if (rect.size.width == 0.0f) {
  255. av_log(ctx, AV_LOG_WARNING, "Width of output rect is zero.\n");
  256. }
  257. if (rect.size.height == 0.0f) {
  258. av_log(ctx, AV_LOG_WARNING, "Height of output rect is zero.\n");
  259. }
  260. }
  261. CGContextDrawImage(ctx->cgctx, rect, out);
  262. return ff_filter_frame(link, frame);
  263. }
  264. /** Apply all valid filters successively to the input image.
  265. * The final output image is copied from the GPU by "drawing" using a bitmap context.
  266. */
  267. static int filter_frame(AVFilterLink *link, AVFrame *frame)
  268. {
  269. return apply_filter(link->dst->priv, link->dst->outputs[0], frame);
  270. }
  271. static int request_frame(AVFilterLink *link)
  272. {
  273. CoreImageContext *ctx = link->src->priv;
  274. AVFrame *frame;
  275. if (ctx->duration >= 0 &&
  276. av_rescale_q(ctx->pts, ctx->time_base, AV_TIME_BASE_Q) >= ctx->duration) {
  277. return AVERROR_EOF;
  278. }
  279. if (!ctx->picref) {
  280. ctx->picref = ff_get_video_buffer(link, ctx->w, ctx->h);
  281. if (!ctx->picref) {
  282. return AVERROR(ENOMEM);
  283. }
  284. }
  285. frame = av_frame_clone(ctx->picref);
  286. if (!frame) {
  287. return AVERROR(ENOMEM);
  288. }
  289. frame->pts = ctx->pts;
  290. frame->key_frame = 1;
  291. frame->interlaced_frame = 0;
  292. frame->pict_type = AV_PICTURE_TYPE_I;
  293. frame->sample_aspect_ratio = ctx->sar;
  294. ctx->pts++;
  295. return apply_filter(ctx, link, frame);
  296. }
  297. /** Set an option of the given filter to the provided key-value pair.
  298. */
  299. static void set_option(CoreImageContext *ctx, CIFilter *filter, const char *key, const char *value)
  300. {
  301. NSString *input_key = [NSString stringWithUTF8String:key];
  302. NSString *input_val = [NSString stringWithUTF8String:value];
  303. NSDictionary *filter_attribs = [filter attributes]; // <nsstring, id>
  304. NSDictionary *input_attribs = [filter_attribs valueForKey:input_key];
  305. NSString *input_class = [input_attribs valueForKey:kCIAttributeClass];
  306. NSString *input_type = [input_attribs valueForKey:kCIAttributeType];
  307. if (!input_attribs) {
  308. av_log(ctx, AV_LOG_WARNING, "Skipping unknown option: \"%s\".\n",
  309. [input_key UTF8String]); // [[filter name] UTF8String]) not currently defined...
  310. return;
  311. }
  312. av_log(ctx, AV_LOG_DEBUG, "key: %s, val: %s, #attribs: %lu, class: %s, type: %s\n",
  313. [input_key UTF8String],
  314. [input_val UTF8String],
  315. input_attribs ? (unsigned long)[input_attribs count] : -1,
  316. [input_class UTF8String],
  317. [input_type UTF8String]);
  318. if ([input_class isEqualToString:@"NSNumber"]) {
  319. float input = input_val.floatValue;
  320. NSNumber *max_value = [input_attribs valueForKey:kCIAttributeSliderMax];
  321. NSNumber *min_value = [input_attribs valueForKey:kCIAttributeSliderMin];
  322. NSNumber *used_value = nil;
  323. #define CLAMP_WARNING do { \
  324. av_log(ctx, AV_LOG_WARNING, "Value of \"%f\" for option \"%s\" is out of range [%f %f], clamping to \"%f\".\n", \
  325. input, \
  326. [input_key UTF8String], \
  327. min_value.floatValue, \
  328. max_value.floatValue, \
  329. used_value.floatValue); \
  330. } while(0)
  331. if (input > max_value.floatValue) {
  332. used_value = max_value;
  333. CLAMP_WARNING;
  334. } else if (input < min_value.floatValue) {
  335. used_value = min_value;
  336. CLAMP_WARNING;
  337. } else {
  338. used_value = [NSNumber numberWithFloat:input];
  339. }
  340. [filter setValue:used_value forKey:input_key];
  341. } else if ([input_class isEqualToString:@"CIVector"]) {
  342. CIVector *input = [CIVector vectorWithString:input_val];
  343. if (!input) {
  344. av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIVctor description: \"%s\".\n",
  345. [input_val UTF8String]);
  346. return;
  347. }
  348. [filter setValue:input forKey:input_key];
  349. } else if ([input_class isEqualToString:@"CIColor"]) {
  350. CIColor *input = [CIColor colorWithString:input_val];
  351. if (!input) {
  352. av_log(ctx, AV_LOG_WARNING, "Skipping invalid CIColor description: \"%s\".\n",
  353. [input_val UTF8String]);
  354. return;
  355. }
  356. [filter setValue:input forKey:input_key];
  357. } else if ([input_class isEqualToString:@"NSString"]) { // set display name as string with latin1 encoding
  358. [filter setValue:input_val forKey:input_key];
  359. } else if ([input_class isEqualToString:@"NSData"]) { // set display name as string with latin1 encoding
  360. NSData *input = [NSData dataWithBytes:(const void*)[input_val cStringUsingEncoding:NSISOLatin1StringEncoding]
  361. length:[input_val lengthOfBytesUsingEncoding:NSISOLatin1StringEncoding]];
  362. if (!input) {
  363. av_log(ctx, AV_LOG_WARNING, "Skipping invalid NSData description: \"%s\".\n",
  364. [input_val UTF8String]);
  365. return;
  366. }
  367. [filter setValue:input forKey:input_key];
  368. } else {
  369. av_log(ctx, AV_LOG_WARNING, "Skipping unsupported option class: \"%s\".\n",
  370. [input_class UTF8String]);
  371. avpriv_report_missing_feature(ctx, "Handling of some option classes");
  372. return;
  373. }
  374. }
  375. /** Create a filter object by a given name and set all options to defaults.
  376. * Overwrite any option given by the user to the provided value in filter_options.
  377. */
  378. static CIFilter* create_filter(CoreImageContext *ctx, const char *filter_name, AVDictionary *filter_options)
  379. {
  380. // create filter object
  381. CIFilter *filter = [CIFilter filterWithName:[NSString stringWithUTF8String:filter_name]];
  382. // set default options
  383. [filter setDefaults];
  384. // set user options
  385. if (filter_options) {
  386. AVDictionaryEntry *o = NULL;
  387. while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
  388. set_option(ctx, filter, o->key, o->value);
  389. }
  390. }
  391. return filter;
  392. }
  393. static av_cold int init(AVFilterContext *fctx)
  394. {
  395. CoreImageContext *ctx = fctx->priv;
  396. AVDictionary *filter_dict = NULL;
  397. AVDictionaryEntry *f = NULL;
  398. AVDictionaryEntry *o = NULL;
  399. int ret;
  400. int i;
  401. if (ctx->list_filters || ctx->list_generators) {
  402. list_filters(ctx);
  403. return AVERROR_EXIT;
  404. }
  405. if (ctx->filter_string) {
  406. // parse filter string (filter=name@opt=val@opt2=val2#name2@opt3=val3) for filters separated by #
  407. av_log(ctx, AV_LOG_DEBUG, "Filter_string: %s\n", ctx->filter_string);
  408. ret = av_dict_parse_string(&filter_dict, ctx->filter_string, "@", "#", AV_DICT_MULTIKEY); // parse filter_name:all_filter_options
  409. if (ret) {
  410. av_log(ctx, AV_LOG_ERROR, "Parsing of filters failed.\n");
  411. return AVERROR(EIO);
  412. }
  413. ctx->num_filters = av_dict_count(filter_dict);
  414. av_log(ctx, AV_LOG_DEBUG, "Filter count: %i\n", ctx->num_filters);
  415. // allocate CIFilter array
  416. ctx->filters = av_mallocz_array(ctx->num_filters, sizeof(CIFilter*));
  417. if (!ctx->filters) {
  418. av_log(ctx, AV_LOG_ERROR, "Could not allocate filter array.\n");
  419. return AVERROR(ENOMEM);
  420. }
  421. // parse filters for option key-value pairs (opt=val@opt2=val2) separated by @
  422. i = 0;
  423. while ((f = av_dict_get(filter_dict, "", f, AV_DICT_IGNORE_SUFFIX))) {
  424. AVDictionary *filter_options = NULL;
  425. if (strncmp(f->value, "default", 7)) { // not default
  426. ret = av_dict_parse_string(&filter_options, f->value, "=", "@", 0); // parse option_name:option_value
  427. if (ret) {
  428. av_log(ctx, AV_LOG_ERROR, "Parsing of filter options for \"%s\" failed.\n", f->key);
  429. return AVERROR(EIO);
  430. }
  431. }
  432. if (av_log_get_level() >= AV_LOG_DEBUG) {
  433. av_log(ctx, AV_LOG_DEBUG, "Creating filter %i: \"%s\":\n", i, f->key);
  434. if (!filter_options) {
  435. av_log(ctx, AV_LOG_DEBUG, "\tusing default options\n");
  436. } else {
  437. while ((o = av_dict_get(filter_options, "", o, AV_DICT_IGNORE_SUFFIX))) {
  438. av_log(ctx, AV_LOG_DEBUG, "\t%s: %s\n", o->key, o->value);
  439. }
  440. }
  441. }
  442. ctx->filters[i] = CFBridgingRetain(create_filter(ctx, f->key, filter_options));
  443. if (!ctx->filters[i]) {
  444. av_log(ctx, AV_LOG_ERROR, "Could not create filter \"%s\".\n", f->key);
  445. return AVERROR(EINVAL);
  446. }
  447. i++;
  448. }
  449. } else {
  450. av_log(ctx, AV_LOG_ERROR, "No filters specified.\n");
  451. return AVERROR(EINVAL);
  452. }
  453. // create GPU context on OSX
  454. const NSOpenGLPixelFormatAttribute attr[] = {
  455. NSOpenGLPFAAccelerated,
  456. NSOpenGLPFANoRecovery,
  457. NSOpenGLPFAColorSize, 32,
  458. 0
  459. };
  460. NSOpenGLPixelFormat *pixel_format = [[NSOpenGLPixelFormat alloc] initWithAttributes:(void *)&attr];
  461. ctx->color_space = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGB);
  462. ctx->glctx = CFBridgingRetain([CIContext contextWithCGLContext:CGLGetCurrentContext()
  463. pixelFormat:[pixel_format CGLPixelFormatObj]
  464. colorSpace:ctx->color_space
  465. options:nil]);
  466. if (!ctx->glctx) {
  467. av_log(ctx, AV_LOG_ERROR, "CIContext not created.\n");
  468. return AVERROR_EXTERNAL;
  469. }
  470. // Creating an empty input image as input container for the context
  471. ctx->input_image = CFBridgingRetain([CIImage emptyImage]);
  472. return 0;
  473. }
  474. static av_cold int init_src(AVFilterContext *fctx)
  475. {
  476. CoreImageContext *ctx = fctx->priv;
  477. ctx->is_video_source = 1;
  478. ctx->time_base = av_inv_q(ctx->frame_rate);
  479. ctx->pts = 0;
  480. return init(fctx);
  481. }
  482. static av_cold void uninit(AVFilterContext *fctx)
  483. {
  484. #define SafeCFRelease(ptr) do { \
  485. if (ptr) { \
  486. CFRelease(ptr); \
  487. ptr = NULL; \
  488. } \
  489. } while (0)
  490. CoreImageContext *ctx = fctx->priv;
  491. SafeCFRelease(ctx->glctx);
  492. SafeCFRelease(ctx->cgctx);
  493. SafeCFRelease(ctx->color_space);
  494. SafeCFRelease(ctx->input_image);
  495. if (ctx->filters) {
  496. for (int i = 0; i < ctx->num_filters; i++) {
  497. SafeCFRelease(ctx->filters[i]);
  498. }
  499. av_freep(&ctx->filters);
  500. }
  501. av_frame_free(&ctx->picref);
  502. }
  503. static const AVFilterPad vf_coreimage_inputs[] = {
  504. {
  505. .name = "default",
  506. .type = AVMEDIA_TYPE_VIDEO,
  507. .filter_frame = filter_frame,
  508. .config_props = config_input,
  509. },
  510. { NULL }
  511. };
  512. static const AVFilterPad vf_coreimage_outputs[] = {
  513. {
  514. .name = "default",
  515. .type = AVMEDIA_TYPE_VIDEO,
  516. },
  517. { NULL }
  518. };
  519. static const AVFilterPad vsrc_coreimagesrc_outputs[] = {
  520. {
  521. .name = "default",
  522. .type = AVMEDIA_TYPE_VIDEO,
  523. .request_frame = request_frame,
  524. .config_props = config_output,
  525. },
  526. { NULL }
  527. };
  528. #define OFFSET(x) offsetof(CoreImageContext, x)
  529. #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  530. #define GENERATOR_OPTIONS \
  531. {"size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
  532. {"s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "320x240"}, 0, 0, FLAGS}, \
  533. {"rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS}, \
  534. {"r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, FLAGS}, \
  535. {"duration", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
  536. {"d", "set video duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS}, \
  537. {"sar", "set video sample aspect ratio", OFFSET(sar), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, INT_MAX, FLAGS},
  538. #define FILTER_OPTIONS \
  539. {"list_filters", "list available filters", OFFSET(list_filters), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
  540. {"list_generators", "list available generators", OFFSET(list_generators), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, .flags = FLAGS}, \
  541. {"filter", "names and options of filters to apply", OFFSET(filter_string), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS}, \
  542. {"output_rect", "output rectangle within output image", OFFSET(output_rect), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS},
  543. // definitions for coreimage video filter
  544. static const AVOption coreimage_options[] = {
  545. FILTER_OPTIONS
  546. { NULL }
  547. };
  548. AVFILTER_DEFINE_CLASS(coreimage);
  549. AVFilter ff_vf_coreimage = {
  550. .name = "coreimage",
  551. .description = NULL_IF_CONFIG_SMALL("Video filtering using CoreImage API."),
  552. .init = init,
  553. .uninit = uninit,
  554. .priv_size = sizeof(CoreImageContext),
  555. .priv_class = &coreimage_class,
  556. .inputs = vf_coreimage_inputs,
  557. .outputs = vf_coreimage_outputs,
  558. .query_formats = query_formats,
  559. };
  560. // definitions for coreimagesrc video source
  561. static const AVOption coreimagesrc_options[] = {
  562. GENERATOR_OPTIONS
  563. FILTER_OPTIONS
  564. { NULL }
  565. };
  566. AVFILTER_DEFINE_CLASS(coreimagesrc);
  567. AVFilter ff_vsrc_coreimagesrc = {
  568. .name = "coreimagesrc",
  569. .description = NULL_IF_CONFIG_SMALL("Video source using image generators of CoreImage API."),
  570. .init = init_src,
  571. .uninit = uninit,
  572. .priv_size = sizeof(CoreImageContext),
  573. .priv_class = &coreimagesrc_class,
  574. .inputs = NULL,
  575. .outputs = vsrc_coreimagesrc_outputs,
  576. .query_formats = query_formats_src,
  577. };