32 #define FF_INTERNAL_FIELDS 1 63 #define NB_ITEMS(list) (list ## _size / sizeof(*list)) 73 for (i = 0; i < nb_counts; i++)
76 for (i = lc = 0; i < nb_layouts; i++) {
78 if (n < 64 && (counts & ((uint64_t)1 << n)))
80 "Removing channel layout 0x%"PRIx64
", redundant with %d channels\n",
188 "%d buffers queued in %s, something may be wrong.\n",
206 #define MAKE_AVFILTERLINK_ACCESSOR(type, field) \ 207 type av_buffersink_get_##field(const AVFilterContext *ctx) { \ 208 av_assert0(ctx->filter->activate == activate); \ 209 return ctx->inputs[0]->field; \ 227 #define CHECK_LIST_SIZE(field) \ 228 if (buf->field ## _size % sizeof(*buf->field)) { \ 229 av_log(ctx, AV_LOG_ERROR, "Invalid size for " #field ": %d, " \ 230 "should be multiple of %d\n", \ 231 buf->field ## _size, (int)sizeof(*buf->field)); \ 232 return AVERROR(EINVAL); \ 289 "Conflicting all_channel_counts and list in options\n");
309 #define OFFSET(x) offsetof(BufferSinkContext, x) 310 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM 316 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM 339 .
name =
"buffersink",
340 .description =
NULL_IF_CONFIG_SMALL(
"Buffer video frames, and make them available to the end of the filter graph."),
342 .priv_class = &buffersink_class,
346 .
inputs = avfilter_vsink_buffer_inputs,
359 .
name =
"abuffersink",
360 .description =
NULL_IF_CONFIG_SMALL(
"Buffer audio frames, and make them available to the end of the filter graph."),
361 .priv_class = &abuffersink_class,
366 .
inputs = avfilter_asink_abuffer_inputs,
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
int frame_wanted_out
True if a frame is currently wanted on the output of this filter.
enum AVSampleFormat * sample_fmts
list of accepted sample formats, terminated by AV_SAMPLE_FMT_NONE
This structure describes decoded (raw) audio or video data.
#define AV_LOG_WARNING
Something somehow does not look correct.
Main libavfilter public API header.
static av_cold int init(AVCodecContext *avctx)
int max_samples
Maximum number of samples to filter at once.
int * sample_rates
list of accepted sample rates, terminated by -1
static const AVOption abuffersink_options[]
int * channel_counts
list of accepted channel counts, terminated by -1
static int return_or_keep_frame(BufferSinkContext *buf, AVFrame *out, AVFrame *in, int flags)
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate.The lists are not just lists
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct AVFilterGraph * graph
filtergraph this filter belongs to
memory buffer sink API for audio and video
const char * name
Pad name.
int ff_filter_graph_run_once(AVFilterGraph *graph)
Run one round of processing on a filter graph.
AVFilterLink ** inputs
array of pointers to input links
char * name
name of this filter instance
AVBufferSinkParams * av_buffersink_params_alloc(void)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static const AVOption buffersink_options[]
#define CHECK_LIST_SIZE(field)
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
A filter pad used for either input or output.
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
AVFILTER_DEFINE_CLASS(buffersink)
A link between two filters.
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
static int vsink_query_formats(AVFilterContext *ctx)
AVABufferSinkParams * av_abuffersink_params_alloc(void)
int min_samples
Minimum number of samples to filter at once.
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void * priv
private data for use by the filter
static int activate(AVFilterContext *ctx)
simple assert() macros that are a bit more flexible than ISO C assert().
static const AVFilterPad avfilter_asink_abuffer_inputs[]
static void cleanup_redundant_layouts(AVFilterContext *ctx)
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
common internal API header
int64_t * channel_layouts
list of accepted channel layouts, terminated by -1
audio channel layout utility functions
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
Same as av_buffersink_get_frame(), but with the ability to specify the number of samples read...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
int partial_buf_size
Size of the partial buffer to allocate.
enum AVPixelFormat * pixel_fmts
list of accepted pixel formats, must be terminated with -1
static const AVFilterPad outputs[]
offset must point to a pointer immediately followed by an int for the length
AVFilter ff_asink_abuffer
A list of supported channel layouts.
AVSampleFormat
Audio sample formats.
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Rational number (pair of numerator and denominator).
they must not be accessed directly The fifo field contains the frames that are queued in the input for processing by the filter The status_in and status_out fields contains the queued status(EOF or error) of the link
const char * name
Filter name.
enum MovChannelLayoutTag * layouts
#define flags(name, subs,...)
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples)
A reference to a data buffer.
static int query_formats(AVFilterContext *ctx)
static size_t ff_framequeue_queued_frames(const FFFrameQueue *fq)
Get the number of queued frames.
common internal and external API header
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
static av_cold int common_init(AVFilterContext *ctx)
#define AV_BUFFERSINK_FLAG_PEEK
Tell av_buffersink_get_buffer_ref() to read video/samples buffer reference, but not remove it from th...
static int asink_query_formats(AVFilterContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio samples
static const AVFilterPad avfilter_vsink_buffer_inputs[]
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
#define MAKE_AVFILTERLINK_ACCESSOR(type, field)
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
AVPixelFormat
Pixel format.
const AVFilter * filter
the AVFilter of which this is an instance