FFmpeg
transcoding.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Nicolas George
3  * Copyright (c) 2011 Stefano Sabatini
4  * Copyright (c) 2014 Andrey Utkin
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 /**
26  * @file
27  * API example for demuxing, decoding, filtering, encoding and muxing
28  * @example transcoding.c
29  */
30 
31 #include <libavcodec/avcodec.h>
32 #include <libavformat/avformat.h>
33 #include <libavfilter/buffersink.h>
34 #include <libavfilter/buffersrc.h>
35 #include <libavutil/opt.h>
36 #include <libavutil/pixdesc.h>
37 
40 typedef struct FilteringContext {
44 
49 
50 typedef struct StreamContext {
53 
57 
58 static int open_input_file(const char *filename)
59 {
60  int ret;
61  unsigned int i;
62 
63  ifmt_ctx = NULL;
64  if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
65  av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
66  return ret;
67  }
68 
69  if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
70  av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
71  return ret;
72  }
73 
74  stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
75  if (!stream_ctx)
76  return AVERROR(ENOMEM);
77 
78  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
79  AVStream *stream = ifmt_ctx->streams[i];
81  AVCodecContext *codec_ctx;
82  if (!dec) {
83  av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
85  }
86  codec_ctx = avcodec_alloc_context3(dec);
87  if (!codec_ctx) {
88  av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
89  return AVERROR(ENOMEM);
90  }
91  ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
92  if (ret < 0) {
93  av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
94  "for stream #%u\n", i);
95  return ret;
96  }
97  /* Reencode video & audio and remux subtitles etc. */
98  if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
99  || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
100  if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
101  codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
102  /* Open decoder */
103  ret = avcodec_open2(codec_ctx, dec, NULL);
104  if (ret < 0) {
105  av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
106  return ret;
107  }
108  }
109  stream_ctx[i].dec_ctx = codec_ctx;
110 
111  stream_ctx[i].dec_frame = av_frame_alloc();
112  if (!stream_ctx[i].dec_frame)
113  return AVERROR(ENOMEM);
114  }
115 
116  av_dump_format(ifmt_ctx, 0, filename, 0);
117  return 0;
118 }
119 
120 static int open_output_file(const char *filename)
121 {
122  AVStream *out_stream;
123  AVStream *in_stream;
124  AVCodecContext *dec_ctx, *enc_ctx;
125  AVCodec *encoder;
126  int ret;
127  unsigned int i;
128 
129  ofmt_ctx = NULL;
130  avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
131  if (!ofmt_ctx) {
132  av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
133  return AVERROR_UNKNOWN;
134  }
135 
136 
137  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
138  out_stream = avformat_new_stream(ofmt_ctx, NULL);
139  if (!out_stream) {
140  av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
141  return AVERROR_UNKNOWN;
142  }
143 
144  in_stream = ifmt_ctx->streams[i];
145  dec_ctx = stream_ctx[i].dec_ctx;
146 
147  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
148  || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
149  /* in this example, we choose transcoding to same codec */
150  encoder = avcodec_find_encoder(dec_ctx->codec_id);
151  if (!encoder) {
152  av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
153  return AVERROR_INVALIDDATA;
154  }
155  enc_ctx = avcodec_alloc_context3(encoder);
156  if (!enc_ctx) {
157  av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
158  return AVERROR(ENOMEM);
159  }
160 
161  /* In this example, we transcode to same properties (picture size,
162  * sample rate etc.). These properties can be changed for output
163  * streams easily using filters */
164  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
165  enc_ctx->height = dec_ctx->height;
166  enc_ctx->width = dec_ctx->width;
167  enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
168  /* take first format from list of supported formats */
169  if (encoder->pix_fmts)
170  enc_ctx->pix_fmt = encoder->pix_fmts[0];
171  else
172  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
173  /* video time_base can be set to whatever is handy and supported by encoder */
174  enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
175  } else {
176  enc_ctx->sample_rate = dec_ctx->sample_rate;
177  enc_ctx->channel_layout = dec_ctx->channel_layout;
179  /* take first format from list of supported formats */
180  enc_ctx->sample_fmt = encoder->sample_fmts[0];
181  enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
182  }
183 
184  if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
186 
187  /* Third parameter can be used to pass settings to encoder */
188  ret = avcodec_open2(enc_ctx, encoder, NULL);
189  if (ret < 0) {
190  av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
191  return ret;
192  }
193  ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
194  if (ret < 0) {
195  av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
196  return ret;
197  }
198 
199  out_stream->time_base = enc_ctx->time_base;
200  stream_ctx[i].enc_ctx = enc_ctx;
201  } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
202  av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
203  return AVERROR_INVALIDDATA;
204  } else {
205  /* if this stream must be remuxed */
206  ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
207  if (ret < 0) {
208  av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
209  return ret;
210  }
211  out_stream->time_base = in_stream->time_base;
212  }
213 
214  }
215  av_dump_format(ofmt_ctx, 0, filename, 1);
216 
217  if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
218  ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
219  if (ret < 0) {
220  av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
221  return ret;
222  }
223  }
224 
225  /* init muxer, write output file header */
226  ret = avformat_write_header(ofmt_ctx, NULL);
227  if (ret < 0) {
228  av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
229  return ret;
230  }
231 
232  return 0;
233 }
234 
236  AVCodecContext *enc_ctx, const char *filter_spec)
237 {
238  char args[512];
239  int ret = 0;
240  const AVFilter *buffersrc = NULL;
241  const AVFilter *buffersink = NULL;
247 
248  if (!outputs || !inputs || !filter_graph) {
249  ret = AVERROR(ENOMEM);
250  goto end;
251  }
252 
253  if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
254  buffersrc = avfilter_get_by_name("buffer");
255  buffersink = avfilter_get_by_name("buffersink");
256  if (!buffersrc || !buffersink) {
257  av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
258  ret = AVERROR_UNKNOWN;
259  goto end;
260  }
261 
262  snprintf(args, sizeof(args),
263  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
264  dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
265  dec_ctx->time_base.num, dec_ctx->time_base.den,
266  dec_ctx->sample_aspect_ratio.num,
267  dec_ctx->sample_aspect_ratio.den);
268 
269  ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
270  args, NULL, filter_graph);
271  if (ret < 0) {
272  av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
273  goto end;
274  }
275 
276  ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
277  NULL, NULL, filter_graph);
278  if (ret < 0) {
279  av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
280  goto end;
281  }
282 
283  ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
284  (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
286  if (ret < 0) {
287  av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
288  goto end;
289  }
290  } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
291  buffersrc = avfilter_get_by_name("abuffer");
292  buffersink = avfilter_get_by_name("abuffersink");
293  if (!buffersrc || !buffersink) {
294  av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
295  ret = AVERROR_UNKNOWN;
296  goto end;
297  }
298 
299  if (!dec_ctx->channel_layout)
300  dec_ctx->channel_layout =
302  snprintf(args, sizeof(args),
303  "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
304  dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
306  dec_ctx->channel_layout);
307  ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
308  args, NULL, filter_graph);
309  if (ret < 0) {
310  av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
311  goto end;
312  }
313 
314  ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
315  NULL, NULL, filter_graph);
316  if (ret < 0) {
317  av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
318  goto end;
319  }
320 
321  ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
322  (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
324  if (ret < 0) {
325  av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
326  goto end;
327  }
328 
329  ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
330  (uint8_t*)&enc_ctx->channel_layout,
331  sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
332  if (ret < 0) {
333  av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
334  goto end;
335  }
336 
337  ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
338  (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
340  if (ret < 0) {
341  av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
342  goto end;
343  }
344  } else {
345  ret = AVERROR_UNKNOWN;
346  goto end;
347  }
348 
349  /* Endpoints for the filter graph. */
350  outputs->name = av_strdup("in");
351  outputs->filter_ctx = buffersrc_ctx;
352  outputs->pad_idx = 0;
353  outputs->next = NULL;
354 
355  inputs->name = av_strdup("out");
356  inputs->filter_ctx = buffersink_ctx;
357  inputs->pad_idx = 0;
358  inputs->next = NULL;
359 
360  if (!outputs->name || !inputs->name) {
361  ret = AVERROR(ENOMEM);
362  goto end;
363  }
364 
365  if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
366  &inputs, &outputs, NULL)) < 0)
367  goto end;
368 
369  if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
370  goto end;
371 
372  /* Fill FilteringContext */
375  fctx->filter_graph = filter_graph;
376 
377 end:
378  avfilter_inout_free(&inputs);
379  avfilter_inout_free(&outputs);
380 
381  return ret;
382 }
383 
384 static int init_filters(void)
385 {
386  const char *filter_spec;
387  unsigned int i;
388  int ret;
389  filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
390  if (!filter_ctx)
391  return AVERROR(ENOMEM);
392 
393  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
394  filter_ctx[i].buffersrc_ctx = NULL;
395  filter_ctx[i].buffersink_ctx = NULL;
396  filter_ctx[i].filter_graph = NULL;
397  if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
398  || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
399  continue;
400 
401 
402  if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
403  filter_spec = "null"; /* passthrough (dummy) filter for video */
404  else
405  filter_spec = "anull"; /* passthrough (dummy) filter for audio */
406  ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
407  stream_ctx[i].enc_ctx, filter_spec);
408  if (ret)
409  return ret;
410 
411  filter_ctx[i].enc_pkt = av_packet_alloc();
412  if (!filter_ctx[i].enc_pkt)
413  return AVERROR(ENOMEM);
414 
415  filter_ctx[i].filtered_frame = av_frame_alloc();
416  if (!filter_ctx[i].filtered_frame)
417  return AVERROR(ENOMEM);
418  }
419  return 0;
420 }
421 
422 static int encode_write_frame(unsigned int stream_index, int flush)
423 {
424  StreamContext *stream = &stream_ctx[stream_index];
425  FilteringContext *filter = &filter_ctx[stream_index];
426  AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
427  AVPacket *enc_pkt = filter->enc_pkt;
428  int ret;
429 
430  av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
431  /* encode filtered frame */
432  av_packet_unref(enc_pkt);
433 
434  ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
435 
436  if (ret < 0)
437  return ret;
438 
439  while (ret >= 0) {
440  ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
441 
442  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
443  return 0;
444 
445  /* prepare packet for muxing */
446  enc_pkt->stream_index = stream_index;
447  av_packet_rescale_ts(enc_pkt,
448  stream->enc_ctx->time_base,
449  ofmt_ctx->streams[stream_index]->time_base);
450 
451  av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
452  /* mux encoded frame */
453  ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
454  }
455 
456  return ret;
457 }
458 
459 static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
460 {
461  FilteringContext *filter = &filter_ctx[stream_index];
462  int ret;
463 
464  av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
465  /* push the decoded frame into the filtergraph */
467  frame, 0);
468  if (ret < 0) {
469  av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
470  return ret;
471  }
472 
473  /* pull filtered frames from the filtergraph */
474  while (1) {
475  av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
477  filter->filtered_frame);
478  if (ret < 0) {
479  /* if no more frames for output - returns AVERROR(EAGAIN)
480  * if flushed and no more frames for output - returns AVERROR_EOF
481  * rewrite retcode to 0 to show it as normal procedure completion
482  */
483  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
484  ret = 0;
485  break;
486  }
487 
489  ret = encode_write_frame(stream_index, 0);
491  if (ret < 0)
492  break;
493  }
494 
495  return ret;
496 }
497 
498 static int flush_encoder(unsigned int stream_index)
499 {
500  if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
502  return 0;
503 
504  av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
505  return encode_write_frame(stream_index, 1);
506 }
507 
508 int main(int argc, char **argv)
509 {
510  int ret;
511  AVPacket *packet = NULL;
512  unsigned int stream_index;
513  unsigned int i;
514 
515  if (argc != 3) {
516  av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
517  return 1;
518  }
519 
520  if ((ret = open_input_file(argv[1])) < 0)
521  goto end;
522  if ((ret = open_output_file(argv[2])) < 0)
523  goto end;
524  if ((ret = init_filters()) < 0)
525  goto end;
526  if (!(packet = av_packet_alloc()))
527  goto end;
528 
529  /* read all packets */
530  while (1) {
531  if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
532  break;
533  stream_index = packet->stream_index;
534  av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
535  stream_index);
536 
537  if (filter_ctx[stream_index].filter_graph) {
538  StreamContext *stream = &stream_ctx[stream_index];
539 
540  av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
541 
542  av_packet_rescale_ts(packet,
543  ifmt_ctx->streams[stream_index]->time_base,
544  stream->dec_ctx->time_base);
545  ret = avcodec_send_packet(stream->dec_ctx, packet);
546  if (ret < 0) {
547  av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
548  break;
549  }
550 
551  while (ret >= 0) {
552  ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
553  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
554  break;
555  else if (ret < 0)
556  goto end;
557 
558  stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
559  ret = filter_encode_write_frame(stream->dec_frame, stream_index);
560  if (ret < 0)
561  goto end;
562  }
563  } else {
564  /* remux this frame without reencoding */
565  av_packet_rescale_ts(packet,
566  ifmt_ctx->streams[stream_index]->time_base,
567  ofmt_ctx->streams[stream_index]->time_base);
568 
569  ret = av_interleaved_write_frame(ofmt_ctx, packet);
570  if (ret < 0)
571  goto end;
572  }
573  av_packet_unref(packet);
574  }
575 
576  /* flush filters and encoders */
577  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
578  /* flush filter */
579  if (!filter_ctx[i].filter_graph)
580  continue;
582  if (ret < 0) {
583  av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
584  goto end;
585  }
586 
587  /* flush encoder */
588  ret = flush_encoder(i);
589  if (ret < 0) {
590  av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
591  goto end;
592  }
593  }
594 
595  av_write_trailer(ofmt_ctx);
596 end:
597  av_packet_free(&packet);
598  for (i = 0; i < ifmt_ctx->nb_streams; i++) {
599  avcodec_free_context(&stream_ctx[i].dec_ctx);
600  if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
601  avcodec_free_context(&stream_ctx[i].enc_ctx);
602  if (filter_ctx && filter_ctx[i].filter_graph) {
603  avfilter_graph_free(&filter_ctx[i].filter_graph);
604  av_packet_free(&filter_ctx[i].enc_pkt);
605  av_frame_free(&filter_ctx[i].filtered_frame);
606  }
607 
608  av_frame_free(&stream_ctx[i].dec_frame);
609  }
610  av_free(filter_ctx);
611  av_free(stream_ctx);
612  avformat_close_input(&ifmt_ctx);
613  if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
614  avio_closep(&ofmt_ctx->pb);
615  avformat_free_context(ofmt_ctx);
616 
617  if (ret < 0)
618  av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
619 
620  return ret ? 1 : 0;
621 }
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1137
AVFilterGraph * filter_graph
Definition: transcoding.c:43
#define NULL
Definition: coverity.c:32
AVRational framerate
Definition: avcodec.h:2071
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1259
static void flush(AVCodecContext *avctx)
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
Memory buffer source API.
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1024
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
int num
Numerator.
Definition: rational.h:59
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:915
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
AVFrame * filtered_frame
Definition: transcoding.c:46
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
static int encode_write_frame(unsigned int stream_index, int flush)
Definition: transcoding.c:422
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:601
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:547
AVCodec.
Definition: codec.h:197
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:659
Undefined.
Definition: avutil.h:273
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Format I/O context.
Definition: avformat.h:1247
static int open_input_file(const char *filename)
Definition: transcoding.c:58
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5196
memory buffer sink API for audio and video
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:77
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1204
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:519
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:191
AVOptions.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4553
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1315
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:147
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define av_log(a,...)
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:640
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static int flush_encoder(unsigned int stream_index)
Definition: transcoding.c:498
static AVFormatContext * ifmt_ctx
Definition: transcoding.c:38
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:737
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:204
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:643
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:941
int avformat_alloc_output_context2(AVFormatContext **ctx, ff_const59 AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:136
static FilteringContext * filter_ctx
Definition: transcoding.c:48
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1247
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:545
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1303
AVFilterContext * buffersrc_ctx
Definition: transcoding.c:42
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
enum AVPixelFormat * pix_fmts
array of supported pixel formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:218
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, const char *filter_spec)
Definition: transcoding.c:235
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:506
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:170
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
int width
picture width / height.
Definition: avcodec.h:709
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:461
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
AVPacket * enc_pkt
Definition: transcoding.c:45
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1018
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1266
static int init_filters(void)
Definition: transcoding.c:384
Stream structure.
Definition: avformat.h:884
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1013
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:580
AVCodecContext * enc_ctx
Definition: transcoding.c:52
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:544
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:185
enum AVCodecID codec_id
Definition: avcodec.h:546
int sample_rate
samples per second
Definition: avcodec.h:1196
AVIOContext * pb
I/O context.
Definition: avformat.h:1289
main external API structure.
Definition: avcodec.h:536
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
static StreamContext * stream_ctx
Definition: transcoding.c:56
Filter definition.
Definition: avfilter.h:145
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1021
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int open_output_file(const char *filename)
Definition: transcoding.c:120
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:90
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:582
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4480
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1772
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:946
char * name
unique name for this input/output in the list
Definition: avfilter.h:1015
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:329
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
Main libavformat public API header.
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:207
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3650
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:72
static AVCodecContext * dec_ctx
int den
Denominator.
Definition: rational.h:60
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
Definition: transcoding.c:459
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:166
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4525
AVFrame * dec_frame
Definition: transcoding.c:54
#define AVERROR_DECODER_NOT_FOUND
Decoder not found.
Definition: error.h:52
#define av_free(p)
int channels
number of audio channels
Definition: avcodec.h:1197
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
AVCodecContext * dec_ctx
Definition: transcoding.c:51
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:549
An instance of a filter.
Definition: avfilter.h:341
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1274
int main(int argc, char **argv)
Definition: transcoding.c:508
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:188
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1049
#define av_malloc_array(a, b)
enum AVSampleFormat * sample_fmts
array of supported sample formats, or NULL if unknown, array is terminated by -1
Definition: codec.h:220
AVFilterContext * buffersink_ctx
Definition: transcoding.c:41
int stream_index
Definition: packet.h:371
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:913
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:88
This structure stores compressed data.
Definition: packet.h:346
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1192
static AVFormatContext * ofmt_ctx
Definition: transcoding.c:39
int i
Definition: input.c:407
void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.c:190