Go to the documentation of this file.
30 #include <libplacebo/renderer.h>
31 #include <libplacebo/utils/libav.h>
32 #include <libplacebo/utils/frame_queue.h>
33 #include <libplacebo/vulkan.h>
39 return frame->user_data;
212 #if FF_API_LIBPLACEBO_OPTS
220 int tonemapping_mode;
270 av_log(log_ctx, av_lev,
"%s\n", msg);
277 #if PL_API_VER >= 246
295 switch (gamut_mode) {
296 #if PL_API_VER >= 269
297 case GAMUT_MAP_CLIP: p->gamut_mapping = &pl_gamut_map_clip;
return;
322 const struct pl_filter_config **opt,
323 const char *
name,
int frame_mixing)
325 const struct pl_filter_preset *
preset, *presets_avail;
326 presets_avail = frame_mixing ? pl_frame_mixers : pl_scale_filters;
328 if (!strcmp(
name,
"help")) {
350 int gamut_mode =
s->gamut_mode;
351 float hybrid_mix =
s->hybrid_mix;
352 uint8_t color_rgba[4];
356 #if FF_API_LIBPLACEBO_OPTS
358 switch (
s->tonemapping_mode) {
360 if (
s->desat_str >= 0.0f)
361 hybrid_mix =
s->desat_str;
363 case 1: hybrid_mix = 1.0f;
break;
364 case 2: hybrid_mix = 0.2f;
break;
365 case 3: hybrid_mix = 0.0f;
break;
366 case 4: hybrid_mix = 0.0f;
break;
375 if (
s->gamut_warning)
377 if (
s->gamut_clipping)
381 s->deband_params = *pl_deband_params(
382 .iterations =
s->deband_iterations,
383 .threshold =
s->deband_threshold,
384 .radius =
s->deband_radius,
385 .grain =
s->deband_grain,
388 s->color_adjustment = (
struct pl_color_adjustment) {
389 .brightness =
s->brightness,
390 .contrast =
s->contrast,
391 .saturation =
s->saturation,
396 s->peak_detect_params = *pl_peak_detect_params(
397 .smoothing_period =
s->smoothing,
398 .minimum_peak =
s->min_peak,
399 .scene_threshold_low =
s->scene_low,
400 .scene_threshold_high =
s->scene_high,
401 #
if PL_API_VER >= 263
402 .percentile =
s->percentile,
405 .overshoot_margin =
s->overshoot,
409 s->color_map_params = *pl_color_map_params(
410 #
if PL_API_VER >= 269
411 .hybrid_mix = hybrid_mix,
413 .tone_mapping_mode =
s->tonemapping_mode,
414 .tone_mapping_crosstalk =
s->crosstalk,
417 .tone_mapping_param =
s->tonemapping_param,
418 .inverse_tone_mapping =
s->inverse_tonemapping,
419 .lut_size =
s->tonemapping_lut_size,
424 s->dither_params = *pl_dither_params(
425 .method =
s->dithering,
426 .lut_size =
s->dither_lut_size,
427 .temporal =
s->dither_temporal,
430 s->cone_params = *pl_cone_params(
432 .strength =
s->cone_str,
435 s->params = *pl_render_params(
436 .lut_entries =
s->lut_entries,
437 .antiringing_strength =
s->antiringing,
438 .background_transparency = 1.0f - (
float) color_rgba[3] / UINT8_MAX,
439 .background_color = {
440 (float) color_rgba[0] / UINT8_MAX,
441 (float) color_rgba[1] / UINT8_MAX,
442 (float) color_rgba[2] / UINT8_MAX,
444 #
if PL_API_VER >= 277
445 .corner_rounding =
s->corner_rounding,
448 .deband_params =
s->deband ? &
s->deband_params :
NULL,
449 .sigmoid_params =
s->sigmoid ? &pl_sigmoid_default_params :
NULL,
450 .color_adjustment = &
s->color_adjustment,
451 .peak_detect_params =
s->peakdetect ? &
s->peak_detect_params :
NULL,
452 .color_map_params = &
s->color_map_params,
453 .dither_params =
s->dithering >= 0 ? &
s->dither_params :
NULL,
454 .cone_params =
s->cones ? &
s->cone_params :
NULL,
457 .num_hooks =
s->num_hooks,
459 .skip_anti_aliasing =
s->skip_aa,
460 .skip_caching_single_frame =
s->skip_cache,
461 .polar_cutoff =
s->polar_cutoff,
462 .disable_linear_scaling =
s->disable_linear,
463 .disable_builtin_scalers =
s->disable_builtin,
464 .force_dither =
s->force_dither,
465 .disable_fbos =
s->disable_fbos,
480 const struct pl_hook *hook;
482 hook = pl_mpv_user_shader_parse(
s->gpu, shader,
len);
488 s->hooks[
s->num_hooks++] = hook;
500 s->log = pl_log_create(PL_API_VER, pl_log_params(
509 if (
s->out_format_string) {
513 s->out_format_string);
541 if (strcmp(
s->fps_string,
"none") != 0)
551 #if PL_API_VER >= 278
552 static void lock_queue(
void *priv, uint32_t qf, uint32_t qidx)
559 static void unlock_queue(
void *priv, uint32_t qf, uint32_t qidx)
575 #if PL_API_VER >= 278
577 s->vulkan = pl_vulkan_import(
s->log, pl_vulkan_import_params(
578 .instance = hwctx->
inst,
589 .index = hwctx->queue_family_index,
590 .count = hwctx->nb_graphics_queues,
593 .index = hwctx->queue_family_comp_index,
594 .count = hwctx->nb_comp_queues,
597 .index = hwctx->queue_family_tx_index,
598 .count = hwctx->nb_tx_queues,
601 .max_api_version = VK_API_VERSION_1_3,
605 "Vulkan device, remove it or upgrade libplacebo to >= 5.278\n",
611 s->vulkan = pl_vulkan_create(
s->log, pl_vulkan_params(
618 hwctx ?
"importing" :
"creating");
624 s->gpu =
s->vulkan->gpu;
625 s->renderer = pl_renderer_create(
s->log,
s->gpu);
626 s->queue = pl_queue_create(
s->gpu);
629 if (
s->shader_bin_len)
632 if (
s->shader_path &&
s->shader_path[0]) {
649 pl_tex_destroy(
s->gpu, &
s->tex[
i]);
650 for (
int i = 0;
i <
s->num_hooks;
i++)
651 pl_mpv_user_shader_destroy(&
s->hooks[
i]);
652 pl_renderer_destroy(&
s->renderer);
653 pl_queue_destroy(&
s->queue);
654 pl_vulkan_destroy(&
s->vulkan);
655 pl_log_destroy(&
s->log);
671 const char *
arg,
char *res,
int res_len,
684 struct pl_frame_mix *
mix,
struct pl_frame *target,
685 uint64_t ref_sig,
double target_pts)
689 for (
int i = 0;
i <
mix->num_frames;
i++) {
692 struct pl_frame *image = (
struct pl_frame *)
mix->frames[
i];
694 double image_pts =
src->pts *
av_q2d(
ctx->inputs[0]->time_base);
699 s->var_values[
VAR_N] =
ctx->outputs[0]->frame_count_out;
719 image->crop.x1 = image->crop.x0 +
s->var_values[
VAR_CROP_W];
720 image->crop.y1 = image->crop.y0 +
s->var_values[
VAR_CROP_H];
722 if (
mix->signatures[
i] == ref_sig) {
726 target->crop.x1 = target->crop.x0 +
s->var_values[
VAR_POS_W];
727 target->crop.y1 = target->crop.y0 +
s->var_values[
VAR_POS_H];
729 if (
s->target_sar.num) {
730 float aspect = pl_rect2df_aspect(&target->crop) *
av_q2d(
s->target_sar);
731 pl_rect2df_aspect_set(&target->crop, aspect,
s->pad_crop_ratio);
739 struct pl_frame_mix *
mix,
742 int err = 0, ok, changed_csp;
746 struct pl_frame target;
750 if (!
mix->num_frames)
758 for (
int i = 0;
i <
mix->num_frames;
i++) {
759 if (
i &&
mix->timestamps[
i] > 0.0f)
762 ref_sig =
mix->signatures[
i];
767 out->width = outlink->
w;
768 out->height = outlink->
h;
780 if (
s->colorspace >= 0)
781 out->colorspace =
s->colorspace;
782 if (
s->color_range >= 0)
783 out->color_range =
s->color_range;
784 if (
s->color_trc >= 0)
785 out->color_trc =
s->color_trc;
786 if (
s->color_primaries >= 0)
787 out->color_primaries =
s->color_primaries;
796 changed_csp =
ref->colorspace !=
out->colorspace ||
797 ref->color_range !=
out->color_range ||
798 ref->color_trc !=
out->color_trc ||
799 ref->color_primaries !=
out->color_primaries;
807 if (
s->apply_dovi || changed_csp) {
811 if (
s->apply_filmgrain)
816 ok = pl_map_avframe_ex(
s->gpu, &target, pl_avframe_params(
821 ok = pl_frame_recreate_from_avframe(
s->gpu, &target,
s->tex,
out);
829 pl_render_image_mix(
s->renderer,
mix, &target, &
s->params);
832 pl_unmap_avframe(
s->gpu, &target);
833 }
else if (!pl_download_avframe(
s->gpu, &target,
out)) {
845 const struct pl_source_frame *
src,
846 struct pl_frame *
out)
850 bool ok = pl_map_avframe_ex(gpu,
out, pl_avframe_params(
853 .map_dovi =
s->apply_dovi,
856 if (!
s->apply_filmgrain)
857 out->film_grain.type = PL_FILM_GRAIN_NONE;
864 const struct pl_source_frame *
src)
866 pl_unmap_avframe(gpu,
frame);
889 pl_queue_push(
s->queue, &(
struct pl_source_frame) {
890 .pts = in->pts * av_q2d(inlink->time_base),
891 .duration = in->duration * av_q2d(inlink->time_base),
892 .first_field = pl_field_from_avframe(in),
895 .unmap = unmap_frame,
896 .discard = discard_frame,
912 pl_queue_push(
s->queue,
NULL);
918 struct pl_frame_mix
mix;
919 enum pl_queue_status
ret;
933 if (
s->status &&
pts >=
s->status_pts) {
938 ret = pl_queue_update(
s->queue, &
mix, pl_queue_params(
940 .radius = pl_frame_mix_radius(&
s->params),
973 if (
ctx->hw_device_ctx) {
976 vkhwctx = avhwctx->
hwctx;
992 if (!vkhwctx || vkhwctx->
act_dev !=
s->vulkan->device)
996 if (!pl_test_pixfmt(
s->gpu,
pixfmt))
1009 }
else if (
pixfmt ==
s->out_format) {
1019 if (!infmts || !outfmts) {
1020 if (
s->out_format) {
1035 if (outfmts && !outfmts->refcount)
1049 s->vkctx.input_format =
inlink->format;
1068 &outlink->
w, &outlink->
h));
1071 s->force_original_aspect_ratio,
1072 s->force_divisible_by);
1075 if (
inlink->sample_aspect_ratio.num)
1078 if (
s->normalize_sar) {
1081 s->target_sar = scale_sar;
1086 if (
inlink->sample_aspect_ratio.num)
1096 s->skip_cache =
true;
1117 s->vkctx.output_width = outlink->
w;
1118 s->vkctx.output_height = outlink->
h;
1121 s->vkctx.output_format =
s->vkctx.input_format;
1123 s->vkctx.output_format =
s->out_format;
1128 vkfc->
usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
1136 #define OFFSET(x) offsetof(LibplaceboContext, x)
1137 #define STATIC (AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
1138 #define DYNAMIC (STATIC | AV_OPT_FLAG_RUNTIME_PARAM)
1153 {
"force_original_aspect_ratio",
"decrease or increase w/h if necessary to keep the original AR",
OFFSET(force_original_aspect_ratio),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 2,
STATIC,
"force_oar" },
1157 {
"force_divisible_by",
"enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used",
OFFSET(force_divisible_by),
AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 256,
STATIC },
1158 {
"normalize_sar",
"force SAR normalization to 1:1 by adjusting pos_x/y/w/h",
OFFSET(normalize_sar),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1,
STATIC },
1159 {
"pad_crop_ratio",
"ratio between padding and cropping when normalizing SAR (0=pad, 1=crop)",
OFFSET(pad_crop_ratio),
AV_OPT_TYPE_FLOAT, {.dbl=0.0}, 0.0, 1.0,
DYNAMIC },
1164 {
"auto",
"keep the same colorspace", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX,
STATIC,
"colorspace"},
1188 {
"auto",
"keep the same color primaries", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX,
STATIC,
"color_primaries"},
1204 {
"auto",
"keep the same color transfer", 0,
AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX,
STATIC,
"color_trc"},
1261 #if PL_API_VER >= 246
1273 {
"tonemapping_param",
"Tunable parameter for some tone-mapping functions",
OFFSET(tonemapping_param),
AV_OPT_TYPE_FLOAT, {.dbl = 0.0}, 0.0, 100.0, .flags =
DYNAMIC },
1274 {
"inverse_tonemapping",
"Inverse tone mapping (range expansion)",
OFFSET(inverse_tonemapping),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1,
DYNAMIC },
1278 #if FF_API_LIBPLACEBO_OPTS
1286 {
"relative",
"Relative colorimetric", 0,
AV_OPT_TYPE_CONST, {.i64 = PL_INTENT_RELATIVE_COLORIMETRIC}, 0, 0,
STATIC,
"intent" },
1287 {
"absolute",
"Absolute colorimetric", 0,
AV_OPT_TYPE_CONST, {.i64 = PL_INTENT_ABSOLUTE_COLORIMETRIC}, 0, 0,
STATIC,
"intent" },
1288 {
"saturation",
"Saturation mapping", 0,
AV_OPT_TYPE_CONST, {.i64 = PL_INTENT_SATURATION}, 0, 0,
STATIC,
"intent" },
1299 {
"dithering",
"Dither method to use",
OFFSET(dithering),
AV_OPT_TYPE_INT, {.i64 = PL_DITHER_BLUE_NOISE}, -1, PL_DITHER_METHOD_COUNT - 1,
DYNAMIC,
"dither" },
1303 {
"ordered_fixed",
"Fixed function ordered", 0,
AV_OPT_TYPE_CONST, {.i64 = PL_DITHER_ORDERED_FIXED}, 0, 0,
STATIC,
"dither" },
1322 #if FF_API_LIBPLACEBO_OPTS
1349 .
name =
"libplacebo",
1359 .priv_class = &libplacebo_class,
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
void * hwctx
The format-specific data, allocated and freed by libavutil along with this context.
void av_fifo_drain2(AVFifo *f, size_t size)
Discard the specified amount of data from an AVFifo.
enum AVPixelFormat out_format
VkPhysicalDevice phys_dev
Physical device.
#define AV_LOG_WARNING
Something somehow does not look correct.
AVPixelFormat
Pixel format.
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
@ AVCOL_PRI_EBU3213
EBU Tech. 3213-E (nothing there) / one of JEDEC P22 group phosphors.
static int mix(int c0, int c1)
static int libplacebo_query_format(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
#define FF_FILTER_FLAG_HWFRAME_AWARE
The filter is aware of hardware frames, and any hardware frame context should not be automatically pr...
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
#define AVERROR_EOF
End of file.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
uint8_t * data
The data buffer.
@ AVCOL_TRC_LINEAR
"Linear transfer characteristics"
@ AV_FRAME_DATA_DOVI_METADATA
Parsed Dolby Vision metadata, suitable for passing to a software implementation.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
@ AV_FRAME_DATA_FILM_GRAIN_PARAMS
Film grain parameters for a frame, described by AVFilmGrainParams.
static int find_scaler(AVFilterContext *avctx, const struct pl_filter_config **opt, const char *name, int frame_mixing)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
void * opaque
Frame owner's private data.
static int update_settings(AVFilterContext *ctx)
This structure describes decoded (raw) audio or video data.
@ AVCOL_TRC_NB
Not part of ABI.
PFN_vkGetInstanceProcAddr get_proc_addr
Pointer to the instance-provided vkGetInstanceProcAddr loading function.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
@ AVCOL_RANGE_JPEG
Full range content.
static void pl_av_log(void *log_ctx, enum pl_log_level level, const char *msg)
@ AVCOL_SPC_NB
Not part of ABI.
#define FILTER_QUERY_FUNC(func)
const AVPixFmtDescriptor * av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev)
Iterate over all pixel format descriptors known to libavutil.
@ AV_FRAME_DATA_DOVI_RPU_BUFFER
Dolby Vision RPU raw data, suitable for passing to x265 or other libraries.
VkInstance inst
Vulkan instance.
#define AV_LOG_VERBOSE
Detailed information.
@ AVCOL_SPC_RGB
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB), YZX and ST 428-1
int ff_scale_eval_dimensions(void *log_ctx, const char *w_expr, const char *h_expr, AVFilterLink *inlink, AVFilterLink *outlink, int *ret_w, int *ret_h)
Parse and evaluate string expressions for width and height.
@ AVCOL_TRC_BT2020_12
ITU-R BT2020 for 12-bit system.
static void update_crops(AVFilterContext *ctx, struct pl_frame_mix *mix, struct pl_frame *target, uint64_t ref_sig, double target_pts)
AVBufferRef * hw_device_ctx
For filters which will create hardware frames, sets the device the filter should create them in.
#define FF_API_LIBPLACEBO_OPTS
FF_API_* defines may be placed below to indicate public API that will be dropped at a future version ...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
void ff_vk_uninit(FFVulkanContext *s)
Frees main context.
static AVFrame * pl_get_mapped_avframe(const struct pl_frame *frame)
static bool map_frame(pl_gpu gpu, pl_tex *tex, const struct pl_source_frame *src, struct pl_frame *out)
const char * name
Filter name.
A link between two filters.
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
@ AVCOL_SPC_BT2020_CL
ITU-R BT2020 constant luminance system.
@ AV_PIX_FMT_VULKAN
Vulkan hardware images.
static int libplacebo_activate(AVFilterContext *ctx)
@ AV_HWDEVICE_TYPE_VULKAN
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
static void unmap_frame(pl_gpu gpu, struct pl_frame *frame, const struct pl_source_frame *src)
static int libplacebo_config_output(AVFilterLink *outlink)
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
@ AVCOL_TRC_IEC61966_2_1
IEC 61966-2-1 (sRGB or sYCC)
int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, int log_offset, void *log_ctx)
Read the file with name filename, and put its content in a newly allocated buffer or map it with mmap...
void * priv
private data for use by the filter
struct pl_color_map_params color_map_params
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
@ AVCOL_RANGE_NB
Not part of ABI.
@ AVCOL_TRC_GAMMA28
also ITU-R BT470BG
Allocated as AVHWFramesContext.hwctx, used to set pool-specific options.
static void lock_queue(AVHWDeviceContext *ctx, uint32_t queue_family, uint32_t index)
@ AV_ROUND_UP
Round toward +infinity.
struct pl_render_params params
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
@ AVCOL_TRC_GAMMA22
also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
A filter pad used for either input or output.
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define FF_ARRAY_ELEMS(a)
const AVFilter ff_vf_libplacebo
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
@ AVCOL_PRI_NB
Not part of ABI.
int force_original_aspect_ratio
@ AVCOL_TRC_BT1361_ECG
ITU-R BT1361 Extended Colour Gamut.
struct pl_dither_params dither_params
static void discard_frame(const struct pl_source_frame *src)
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
static double av_q2d(AVRational a)
Convert an AVRational to a double.
static const AVFilterPad libplacebo_inputs[]
AVRational sample_aspect_ratio
agreed upon sample aspect ratio
int(* init)(AVBSFContext *ctx)
static void libplacebo_uninit(AVFilterContext *avctx)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
AVRational frame_rate
Frame rate of the stream on the link, or 1/0 if unknown or variable; if left to 0/0,...
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
@ AVCOL_PRI_SMPTE428
SMPTE ST 428-1 (CIE 1931 XYZ)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
static int libplacebo_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
@ AVCOL_PRI_SMPTE240M
identical to above, also called "SMPTE C" even though it uses D65
#define FILTER_INPUTS(array)
void av_file_unmap(uint8_t *bufptr, size_t size)
Unmap or free the buffer bufptr created by av_file_map().
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a link
@ AVCOL_PRI_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
@ AVCOL_PRI_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
int av_log_get_level(void)
Get the current log level.
static const struct pl_tone_map_function * get_tonemapping_func(int tm)
Main Vulkan context, allocated as AVHWDeviceContext.hwctx.
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
int nb_enabled_dev_extensions
Rational number (pair of numerator and denominator).
@ AVCOL_TRC_IEC61966_2_4
IEC 61966-2-4.
filter_frame For filters that do not use the activate() callback
void(* unlock_queue)(struct AVHWDeviceContext *ctx, uint32_t queue_family, uint32_t index)
Similar to lock_queue(), unlocks a queue.
AVFilterLink ** inputs
array of pointers to input links
@ AVCOL_PRI_BT709
also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP 177 Annex B
@ AV_FRAME_DATA_ICC_PROFILE
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
int ff_vk_filter_config_output(AVFilterLink *outlink)
@ AV_FRAME_DATA_MASTERING_DISPLAY_METADATA
Mastering display metadata associated with a video frame.
VkImageUsageFlagBits usage
Defines extra usage of output frames.
@ AVCOL_TRC_BT2020_10
ITU-R BT2020 for 10-bit system.
@ AVCOL_SPC_YCGCO
used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
@ AVCOL_RANGE_UNSPECIFIED
static int parse_shader(AVFilterContext *avctx, const void *shader, size_t len)
static void set_gamut_mode(struct pl_color_map_params *p, int gamut_mode)
static int libplacebo_config_input(AVFilterLink *inlink)
@ AVCOL_PRI_BT2020
ITU-R BT2020.
static const AVColorPrimariesDesc color_primaries[AVCOL_PRI_NB]
static int init_vulkan(AVFilterContext *avctx, const AVVulkanDeviceContext *hwctx)
@ AVCOL_TRC_SMPTE2084
SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems.
@ AVCOL_PRI_SMPTE431
SMPTE ST 431-2 (2011) / DCI P3.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
@ AVCOL_PRI_FILM
colour filters using Illuminant C
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
AVFILTER_DEFINE_CLASS(libplacebo)
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
int format
agreed upon media format
AVRational time_base
Time base for the timestamps in this frame.
static const AVOption libplacebo_options[]
AVFilterContext * src
source filter
int ff_filter_process_command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Generic processing of user supplied commands that are set in the same way as the filter options.
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
double var_values[VAR_VARS_NB]
#define AVERROR_EXTERNAL
Generic error in an external library.
enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc)
struct pl_deband_params deband_params
void(* lock_queue)(struct AVHWDeviceContext *ctx, uint32_t queue_family, uint32_t index)
Locks a queue, preventing other threads from submitting any command buffers to this queue.
#define AV_LOG_INFO
Standard information.
@ AVCOL_TRC_BT709
also ITU-R BT1361
const VkFormat * av_vkfmt_from_pixfmt(enum AVPixelFormat p)
Returns the optimal per-plane Vulkan format for a given sw_format, one for each plane.
int av_fifo_peek(AVFifo *f, void *buf, size_t nb_elems, size_t offset)
Read data from a FIFO without modifying FIFO state.
@ AVCOL_SPC_SMPTE240M
derived from 170M primaries and D65 white point, 170M is derived from BT470 System M's primaries
struct pl_peak_detect_params peak_detect_params
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
static enum pl_log_level get_log_level(void)
@ AV_FRAME_DATA_CONTENT_LIGHT_LEVEL
Content light level (based on CTA-861.3).
#define i(width, name, range_min, range_max)
@ AVCOL_SPC_BT2020_NCL
ITU-R BT2020 non-constant luminance system.
int w
agreed upon image width
AVBufferRef * hw_frames_ctx
For hwaccel pixel formats, this should be a reference to the AVHWFramesContext describing the frames.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
struct pl_color_adjustment color_adjustment
#define AV_PIX_FMT_FLAG_BE
Pixel format is big-endian.
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
static const char *const var_names[]
const char * name
Pad name.
@ AVCOL_RANGE_MPEG
Narrow or limited range content.
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
This struct describes a set or pool of "hardware" frames (i.e.
@ AVCOL_PRI_BT470M
also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
enum AVPixelFormat pixfmt
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
void * hwctx
The format-specific data, allocated and freed automatically along with this context.
static void unlock_queue(AVHWDeviceContext *ctx, uint32_t queue_family, uint32_t index)
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
int ff_scale_adjust_dimensions(AVFilterLink *inlink, int *ret_w, int *ret_h, int force_original_aspect_ratio, int force_divisible_by)
Transform evaluated width and height obtained from ff_scale_eval_dimensions into actual target width ...
enum AVPixelFormat av_get_pix_fmt(const char *name)
Return the pixel format corresponding to name.
@ AVCOL_TRC_ARIB_STD_B67
ARIB STD-B67, known as "Hybrid log-gamma".
int h
agreed upon image height
static const AVFilterPad libplacebo_outputs[]
static int ref[MAX_W *MAX_W]
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link.
@ AVCOL_TRC_SMPTE170M
also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
AVRational fps
parsed FPS, or 0/0 for "none"
const char *const * enabled_dev_extensions
Enabled device extensions.
int ff_vk_filter_config_input(AVFilterLink *inlink)
static int output_frame_mix(AVFilterContext *ctx, struct pl_frame_mix *mix, int64_t pts)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
VkDevice act_dev
Active device.
@ AVCOL_PRI_SMPTE432
SMPTE ST 432-1 (2010) / P3 D65 / Display P3.
AVFifo * out_pts
timestamps of wanted output frames
#define FILTER_OUTPUTS(array)
struct pl_cone_params cone_params
#define flags(name, subs,...)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
static av_cold int uninit(AVCodecContext *avctx)
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
the definition of that something depends on the semantic of the filter The callback must examine the status of the filter s links and proceed accordingly The status of output links is stored in the status_in and status_out fields and tested by the ff_outlink_frame_wanted() function. If this function returns true
VkPhysicalDeviceFeatures2 device_features
This structure should be set to the set of features that present and enabled during device creation.
static int libplacebo_init(AVFilterContext *avctx)
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
@ AVCOL_SPC_ICTCP
ITU-R BT.2100-0, ICtCp.
const struct pl_hook * hooks[2]
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
static void log_cb(cmsContext ctx, cmsUInt32Number error, const char *str)
#define AV_OPT_FLAG_DEPRECATED
set if option is deprecated, users should refer to AVOption.help text for more information