30 #define _XOPEN_SOURCE 600 104 if (!outputs || !inputs || !filter_graph) {
111 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
113 time_base.
num, time_base.
den,
117 args,
NULL, filter_graph);
166 &inputs, &outputs,
NULL)) < 0)
191 if (delay > 0 && delay < 1000000)
200 for (y = 0; y < frame->
height; y++) {
202 for (x = 0; x < frame->
width; x++)
203 putchar(
" .-+#"[*(p++) / 52]);
210 int main(
int argc,
char **argv)
218 fprintf(stderr,
"Usage: %s file\n", argv[0]);
224 if (!frame || !filt_frame) {
225 perror(
"Could not allocate frame");
250 }
else if (ret < 0) {
286 fprintf(stderr,
"Error occurred: %s\n",
av_err2str(ret));
This structure describes decoded (raw) audio or video data.
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Memory buffer source API.
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
const char * filter_descr
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
memory buffer sink API for audio and video
AVFilterLink ** inputs
array of pointers to input links
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
AVStream ** streams
A list of all streams in the file.
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
#define AVERROR_EOF
End of file.
int main(int argc, char **argv)
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
AVFilterContext * buffersrc_ctx
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
AVRational time_base
Define the time base used by the PTS of the frames/samples which will pass through this link...
static int video_stream_index
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
int width
picture width / height.
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVFilterContext * filter_ctx
filter context associated to this input/output
static const AVFilterPad outputs[]
A linked-list of the inputs/outputs of the filter chain.
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
char * av_strdup(const char *s)
Duplicate a string.
Libavcodec external API header.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
AVFilterGraph * filter_graph
main external API structure.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
static int open_input_file(const char *filename)
int pad_idx
index of the filt_ctx pad to use for linking
Rational number (pair of numerator and denominator).
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
static enum AVPixelFormat pix_fmts[]
AVFilterContext * buffersink_ctx
char * name
unique name for this input/output in the list
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Keep a reference to the frame.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int init_filters(const char *filters_descr)
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
static void display_frame(const AVFrame *frame, AVRational time_base)
AVCodecParameters * codecpar
Codec parameters associated with this stream.
static AVFormatContext * fmt_ctx
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static AVCodecContext * dec_ctx
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
AVPixelFormat
Pixel format.
This structure stores compressed data.
#define AV_NOPTS_VALUE
Undefined timestamp value.