89 "type:%s start_frame:%d nb_frames:%d alpha:%d\n",
95 "type:%s start_time:%f duration:%f alpha:%d\n",
188 int do_alpha,
int step)
197 for (i = slice_start; i <
slice_end; i++) {
199 for (j = 0; j < frame->
width; j++) {
200 #define INTERP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)p[c_name] - (int)c[c_idx]) * s->factor + (1<<15)) >> 16) 201 p[r_idx] =
INTERP(r_idx, 0);
202 p[g_idx] =
INTERP(g_idx, 1);
203 p[b_idx] =
INTERP(b_idx, 2);
205 p[a_idx] =
INTERP(a_idx, 3);
218 for (i = slice_start; i <
slice_end; i++) {
223 for (j = 0; j < frame->
width; j++) {
224 #define INTERPP(c_name, c_idx) av_clip_uint8(((c[c_idx]<<16) + ((int)c_name - (int)c[c_idx]) * s->factor + (1<<15)) >> 16) 239 int slice_start = (frame->
height * jobnr ) / nb_jobs;
247 else if (s->
bpp == 3)
filter_rgb(s, frame, slice_start, slice_end, 0, 3);
248 else if (s->
bpp == 4)
filter_rgb(s, frame, slice_start, slice_end, 0, 4);
259 int slice_start = (frame->
height * jobnr ) / nb_jobs;
264 for (i = slice_start; i <
slice_end; i++) {
266 for (j = 0; j < frame->
width * s->
bpp; j++) {
284 int slice_start = (frame->
height * jobnr ) / nb_jobs;
289 for (i = slice_start; i <
slice_end; i++) {
290 uint16_t *p = (uint16_t *)(frame->
data[k] + i * frame->
linesize[k]);
291 for (j = 0; j < frame->
width * s->
bpp; j++) {
312 int slice_start = (height * jobnr ) / nb_jobs;
315 for (plane = 1; plane < 3; plane++) {
316 for (i = slice_start; i <
slice_end; i++) {
318 for (j = 0; j <
width; j++) {
322 *p = ((*p - 128) * s->
factor + 8421367) >> 16;
339 const int mid = 1 << (s->
depth - 1);
340 const int add = ((mid << 1) + 1) << 15;
341 int slice_start = (height * jobnr ) / nb_jobs;
344 for (plane = 1; plane < 3; plane++) {
345 for (i = slice_start; i <
slice_end; i++) {
346 uint16_t *p = (uint16_t *)(frame->
data[plane] + i * frame->
linesize[plane]);
347 for (j = 0; j <
width; j++) {
348 *p = ((*p - mid) * s->
factor + add) >> 16;
363 int slice_start = (frame->
height * jobnr ) / nb_jobs;
367 for (i = slice_start; i <
slice_end; i++) {
370 for (j = 0; j < frame->
width; j++) {
388 int slice_start = (frame->
height * jobnr ) / nb_jobs;
392 for (i = slice_start; i <
slice_end; i++) {
395 for (j = 0; j < frame->
width; j++) {
496 if (s->
factor < UINT16_MAX) {
520 #define OFFSET(x) offsetof(FadeContext, x) 521 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM 528 {
"start_frame",
"Number of the first frame to which to apply the effect.",
530 {
"s",
"Number of the first frame to which to apply the effect.",
532 {
"nb_frames",
"Number of frames to which the effect should be applied.",
534 {
"n",
"Number of frames to which the effect should be applied.",
537 {
"start_time",
"Number of seconds of the beginning of the effect.",
539 {
"st",
"Number of seconds of the beginning of the effect.",
541 {
"duration",
"Duration of the effect in seconds.",
543 {
"d",
"Duration of the effect in seconds.",
576 .priv_class = &fade_class,
578 .
inputs = avfilter_vf_fade_inputs,
579 .
outputs = avfilter_vf_fade_outputs,
#define AV_PIX_FMT_YUVA422P16
#define AV_PIX_FMT_YUVA422P9
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
This structure describes decoded (raw) audio or video data.
static const AVFilterPad avfilter_vf_fade_outputs[]
#define AV_PIX_FMT_YUVA420P10
#define AV_PIX_FMT_YUV444P14
#define AV_PIX_FMT_YUVA422P10
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
#define INTERPP(c_name, c_idx)
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
#define AV_PIX_FMT_YUV420P12
static const AVOption fade_options[]
static int filter_slice_chroma16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
const char * name
Pad name.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
#define AV_PIX_FMT_FLAG_ALPHA
The pixel format has an alpha channel.
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
#define INTERP(c_name, c_idx)
#define AV_PIX_FMT_YUVA420P9
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
int black_fade
if color_rgba is black
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range...
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AV_LOG_VERBOSE
Detailed information.
#define AV_PIX_FMT_YUV444P16
#define AV_PIX_FMT_YUV422P12
#define AV_PIX_FMT_YUVA420P16
static av_always_inline void filter_rgb(FadeContext *s, const AVFrame *frame, int slice_start, int slice_end, int do_alpha, int step)
A filter pad used for either input or output.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
A link between two filters.
static int query_formats(AVFilterContext *ctx)
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
#define AV_PIX_FMT_FLAG_RGB
The pixel format contains RGB-like data (as opposed to YUV/grayscale).
static void fade(uint8_t *dst, ptrdiff_t dst_linesize, const uint8_t *src, ptrdiff_t src_linesize, int width, int height, int alpha, int beta)
static av_cold int init(AVFilterContext *ctx)
int(* filter_slice_chroma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
void * priv
private data for use by the filter
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
#define AV_PIX_FMT_YUVA444P16
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
simple assert() macros that are a bit more flexible than ISO C assert().
#define AV_PIX_FMT_YUV444P10
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
#define AV_PIX_FMT_YUV422P9
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
#define AV_TIME_BASE
Internal time base represented as integer.
static int filter_slice_rgb(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_PIX_FMT_YUVA444P12
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
static int filter_slice_luma16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
packed RGB 8:8:8, 24bpp, BGRBGR...
#define AV_PIX_FMT_YUVA444P10
static const AVFilterPad avfilter_vf_fade_inputs[]
#define AV_PIX_FMT_YUV444P9
static const AVFilterPad outputs[]
int format
agreed upon media format
int ff_fill_rgba_map(uint8_t *rgba_map, enum AVPixelFormat pix_fmt)
static int filter_slice_alpha16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUV420P16
int(* filter_slice_alpha)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
#define AV_PIX_FMT_YUV420P14
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
#define AV_PIX_FMT_YUV420P10
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Describe the class of an AVClass context structure.
const char * name
Filter name.
#define AV_PIX_FMT_YUV440P12
#define AV_PIX_FMT_YUV420P9
static enum AVPixelFormat pix_fmts_rgb[3]
AVFilterLink ** outputs
array of pointers to output links
static enum AVPixelFormat pix_fmts[]
#define AV_PIX_FMT_YUV422P14
#define flags(name, subs,...)
AVFilterInternal * internal
An opaque struct for libavfilter internal use.
#define AV_PIX_FMT_YUV422P10
#define AV_PIX_FMT_YUV444P12
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
uint8_t color_rgba[4]
fade color
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal and external API header
planar GBRA 4:4:4:4 32bpp
#define AV_PIX_FMT_YUVA444P9
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
avfilter_execute_func * execute
static int slice_end(AVCodecContext *avctx, AVFrame *pict)
Handle slice ends.
static av_always_inline void filter_rgb_planar(FadeContext *s, const AVFrame *frame, int slice_start, int slice_end, int do_alpha)
static int config_input(AVFilterLink *inlink)
AVFilterContext * dst
dest filter
unsigned int black_level_scaled
enum FadeContext::@223 fade_state
static float add(float src0, float src1)
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int depth
Number of bits in the component.
static const enum AVPixelFormat studio_level_pix_fmts[]
AVPixelFormat
Pixel format.
#define AV_PIX_FMT_FLAG_PLANAR
At least one pixel component is not in the first data plane.
#define AV_PIX_FMT_YUV422P16
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
#define AV_PIX_FMT_YUVA422P12
int(* filter_slice_luma)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
AVFILTER_DEFINE_CLASS(fade)