FFmpeg
muxing.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file
25  * libavformat API example.
26  *
27  * Output a media file in any supported libavformat format. The default
28  * codecs are used.
29  * @example muxing.c
30  */
31 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include <libavutil/avassert.h>
39 #include <libavutil/opt.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/timestamp.h>
42 #include <libavcodec/avcodec.h>
43 #include <libavformat/avformat.h>
44 #include <libswscale/swscale.h>
46 
47 #define STREAM_DURATION 10.0
48 #define STREAM_FRAME_RATE 25 /* 25 images/s */
49 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
50 
51 #define SCALE_FLAGS SWS_BICUBIC
52 
53 // a wrapper around a single output AVStream
54 typedef struct OutputStream {
57 
58  /* pts of the next frame that will be generated */
59  int64_t next_pts;
61 
64 
66 
67  float t, tincr, tincr2;
68 
71 } OutputStream;
72 
73 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
74 {
76 
77  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
78  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
79  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
81  pkt->stream_index);
82 }
83 
86 {
87  int ret;
88 
89  // send the frame to the encoder
91  if (ret < 0) {
92  fprintf(stderr, "Error sending a frame to the encoder: %s\n",
93  av_err2str(ret));
94  exit(1);
95  }
96 
97  while (ret >= 0) {
99  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
100  break;
101  else if (ret < 0) {
102  fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
103  exit(1);
104  }
105 
106  /* rescale output packet timestamp values from codec to stream timebase */
107  av_packet_rescale_ts(pkt, c->time_base, st->time_base);
108  pkt->stream_index = st->index;
109 
110  /* Write the compressed frame to the media file. */
113  /* pkt is now blank (av_interleaved_write_frame() takes ownership of
114  * its contents and resets pkt), so that no unreferencing is necessary.
115  * This would be different if one used av_write_frame(). */
116  if (ret < 0) {
117  fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
118  exit(1);
119  }
120  }
121 
122  return ret == AVERROR_EOF ? 1 : 0;
123 }
124 
125 /* Add an output stream. */
127  const AVCodec **codec,
128  enum AVCodecID codec_id)
129 {
130  AVCodecContext *c;
131  int i;
132 
133  /* find the encoder */
134  *codec = avcodec_find_encoder(codec_id);
135  if (!(*codec)) {
136  fprintf(stderr, "Could not find encoder for '%s'\n",
138  exit(1);
139  }
140 
142  if (!ost->tmp_pkt) {
143  fprintf(stderr, "Could not allocate AVPacket\n");
144  exit(1);
145  }
146 
147  ost->st = avformat_new_stream(oc, NULL);
148  if (!ost->st) {
149  fprintf(stderr, "Could not allocate stream\n");
150  exit(1);
151  }
152  ost->st->id = oc->nb_streams-1;
153  c = avcodec_alloc_context3(*codec);
154  if (!c) {
155  fprintf(stderr, "Could not alloc an encoding context\n");
156  exit(1);
157  }
158  ost->enc = c;
159 
160  switch ((*codec)->type) {
161  case AVMEDIA_TYPE_AUDIO:
162  c->sample_fmt = (*codec)->sample_fmts ?
163  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
164  c->bit_rate = 64000;
165  c->sample_rate = 44100;
166  if ((*codec)->supported_samplerates) {
167  c->sample_rate = (*codec)->supported_samplerates[0];
168  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
169  if ((*codec)->supported_samplerates[i] == 44100)
170  c->sample_rate = 44100;
171  }
172  }
173  c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
174  c->channel_layout = AV_CH_LAYOUT_STEREO;
175  if ((*codec)->channel_layouts) {
176  c->channel_layout = (*codec)->channel_layouts[0];
177  for (i = 0; (*codec)->channel_layouts[i]; i++) {
178  if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
179  c->channel_layout = AV_CH_LAYOUT_STEREO;
180  }
181  }
182  c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
183  ost->st->time_base = (AVRational){ 1, c->sample_rate };
184  break;
185 
186  case AVMEDIA_TYPE_VIDEO:
187  c->codec_id = codec_id;
188 
189  c->bit_rate = 400000;
190  /* Resolution must be a multiple of two. */
191  c->width = 352;
192  c->height = 288;
193  /* timebase: This is the fundamental unit of time (in seconds) in terms
194  * of which frame timestamps are represented. For fixed-fps content,
195  * timebase should be 1/framerate and timestamp increments should be
196  * identical to 1. */
198  c->time_base = ost->st->time_base;
199 
200  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
201  c->pix_fmt = STREAM_PIX_FMT;
202  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
203  /* just for testing, we also add B-frames */
204  c->max_b_frames = 2;
205  }
206  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
207  /* Needed to avoid using macroblocks in which some coeffs overflow.
208  * This does not happen with normal video, it just happens here as
209  * the motion of the chroma plane does not match the luma plane. */
210  c->mb_decision = 2;
211  }
212  break;
213 
214  default:
215  break;
216  }
217 
218  /* Some formats want stream headers to be separate. */
219  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
220  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
221 }
222 
223 /**************************************************************/
224 /* audio output */
225 
226 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
227  uint64_t channel_layout,
228  int sample_rate, int nb_samples)
229 {
231  int ret;
232 
233  if (!frame) {
234  fprintf(stderr, "Error allocating an audio frame\n");
235  exit(1);
236  }
237 
238  frame->format = sample_fmt;
239  frame->channel_layout = channel_layout;
240  frame->sample_rate = sample_rate;
241  frame->nb_samples = nb_samples;
242 
243  if (nb_samples) {
245  if (ret < 0) {
246  fprintf(stderr, "Error allocating an audio buffer\n");
247  exit(1);
248  }
249  }
250 
251  return frame;
252 }
253 
254 static void open_audio(AVFormatContext *oc, const AVCodec *codec,
255  OutputStream *ost, AVDictionary *opt_arg)
256 {
257  AVCodecContext *c;
258  int nb_samples;
259  int ret;
260  AVDictionary *opt = NULL;
261 
262  c = ost->enc;
263 
264  /* open it */
265  av_dict_copy(&opt, opt_arg, 0);
266  ret = avcodec_open2(c, codec, &opt);
267  av_dict_free(&opt);
268  if (ret < 0) {
269  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
270  exit(1);
271  }
272 
273  /* init signal generator */
274  ost->t = 0;
275  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
276  /* increment frequency by 110 Hz per second */
277  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
278 
279  if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
280  nb_samples = 10000;
281  else
282  nb_samples = c->frame_size;
283 
284  ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
285  c->sample_rate, nb_samples);
286  ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
287  c->sample_rate, nb_samples);
288 
289  /* copy the stream parameters to the muxer */
291  if (ret < 0) {
292  fprintf(stderr, "Could not copy the stream parameters\n");
293  exit(1);
294  }
295 
296  /* create resampler context */
297  ost->swr_ctx = swr_alloc();
298  if (!ost->swr_ctx) {
299  fprintf(stderr, "Could not allocate resampler context\n");
300  exit(1);
301  }
302 
303  /* set options */
304  av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
305  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
306  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
307  av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
308  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
309  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
310 
311  /* initialize the resampling context */
312  if ((ret = swr_init(ost->swr_ctx)) < 0) {
313  fprintf(stderr, "Failed to initialize the resampling context\n");
314  exit(1);
315  }
316 }
317 
318 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
319  * 'nb_channels' channels. */
321 {
323  int j, i, v;
324  int16_t *q = (int16_t*)frame->data[0];
325 
326  /* check if we want to generate more frames */
328  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
329  return NULL;
330 
331  for (j = 0; j <frame->nb_samples; j++) {
332  v = (int)(sin(ost->t) * 10000);
333  for (i = 0; i < ost->enc->channels; i++)
334  *q++ = v;
335  ost->t += ost->tincr;
336  ost->tincr += ost->tincr2;
337  }
338 
339  frame->pts = ost->next_pts;
340  ost->next_pts += frame->nb_samples;
341 
342  return frame;
343 }
344 
345 /*
346  * encode one audio frame and send it to the muxer
347  * return 1 when encoding is finished, 0 otherwise
348  */
350 {
351  AVCodecContext *c;
352  AVFrame *frame;
353  int ret;
354  int dst_nb_samples;
355 
356  c = ost->enc;
357 
359 
360  if (frame) {
361  /* convert samples from native format to destination codec format, using the resampler */
362  /* compute destination number of samples */
363  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
364  c->sample_rate, c->sample_rate, AV_ROUND_UP);
365  av_assert0(dst_nb_samples == frame->nb_samples);
366 
367  /* when we pass a frame to the encoder, it may keep a reference to it
368  * internally;
369  * make sure we do not overwrite it here
370  */
372  if (ret < 0)
373  exit(1);
374 
375  /* convert to destination format */
377  ost->frame->data, dst_nb_samples,
378  (const uint8_t **)frame->data, frame->nb_samples);
379  if (ret < 0) {
380  fprintf(stderr, "Error while converting\n");
381  exit(1);
382  }
383  frame = ost->frame;
384 
385  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
386  ost->samples_count += dst_nb_samples;
387  }
388 
389  return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
390 }
391 
392 /**************************************************************/
393 /* video output */
394 
396 {
397  AVFrame *picture;
398  int ret;
399 
400  picture = av_frame_alloc();
401  if (!picture)
402  return NULL;
403 
404  picture->format = pix_fmt;
405  picture->width = width;
406  picture->height = height;
407 
408  /* allocate the buffers for the frame data */
409  ret = av_frame_get_buffer(picture, 0);
410  if (ret < 0) {
411  fprintf(stderr, "Could not allocate frame data.\n");
412  exit(1);
413  }
414 
415  return picture;
416 }
417 
418 static void open_video(AVFormatContext *oc, const AVCodec *codec,
419  OutputStream *ost, AVDictionary *opt_arg)
420 {
421  int ret;
422  AVCodecContext *c = ost->enc;
423  AVDictionary *opt = NULL;
424 
425  av_dict_copy(&opt, opt_arg, 0);
426 
427  /* open the codec */
428  ret = avcodec_open2(c, codec, &opt);
429  av_dict_free(&opt);
430  if (ret < 0) {
431  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
432  exit(1);
433  }
434 
435  /* allocate and init a re-usable frame */
436  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
437  if (!ost->frame) {
438  fprintf(stderr, "Could not allocate video frame\n");
439  exit(1);
440  }
441 
442  /* If the output format is not YUV420P, then a temporary YUV420P
443  * picture is needed too. It is then converted to the required
444  * output format. */
445  ost->tmp_frame = NULL;
446  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
447  ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
448  if (!ost->tmp_frame) {
449  fprintf(stderr, "Could not allocate temporary picture\n");
450  exit(1);
451  }
452  }
453 
454  /* copy the stream parameters to the muxer */
456  if (ret < 0) {
457  fprintf(stderr, "Could not copy the stream parameters\n");
458  exit(1);
459  }
460 }
461 
462 /* Prepare a dummy image. */
463 static void fill_yuv_image(AVFrame *pict, int frame_index,
464  int width, int height)
465 {
466  int x, y, i;
467 
468  i = frame_index;
469 
470  /* Y */
471  for (y = 0; y < height; y++)
472  for (x = 0; x < width; x++)
473  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
474 
475  /* Cb and Cr */
476  for (y = 0; y < height / 2; y++) {
477  for (x = 0; x < width / 2; x++) {
478  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
479  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
480  }
481  }
482 }
483 
485 {
486  AVCodecContext *c = ost->enc;
487 
488  /* check if we want to generate more frames */
489  if (av_compare_ts(ost->next_pts, c->time_base,
490  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
491  return NULL;
492 
493  /* when we pass a frame to the encoder, it may keep a reference to it
494  * internally; make sure we do not overwrite it here */
495  if (av_frame_make_writable(ost->frame) < 0)
496  exit(1);
497 
498  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
499  /* as we only generate a YUV420P picture, we must convert it
500  * to the codec pixel format if needed */
501  if (!ost->sws_ctx) {
502  ost->sws_ctx = sws_getContext(c->width, c->height,
504  c->width, c->height,
505  c->pix_fmt,
507  if (!ost->sws_ctx) {
508  fprintf(stderr,
509  "Could not initialize the conversion context\n");
510  exit(1);
511  }
512  }
513  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
514  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
515  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
516  ost->frame->linesize);
517  } else {
518  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
519  }
520 
521  ost->frame->pts = ost->next_pts++;
522 
523  return ost->frame;
524 }
525 
526 /*
527  * encode one video frame and send it to the muxer
528  * return 1 when encoding is finished, 0 otherwise
529  */
531 {
532  return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
533 }
534 
536 {
542  swr_free(&ost->swr_ctx);
543 }
544 
545 /**************************************************************/
546 /* media file output */
547 
548 int main(int argc, char **argv)
549 {
550  OutputStream video_st = { 0 }, audio_st = { 0 };
551  const AVOutputFormat *fmt;
552  const char *filename;
553  AVFormatContext *oc;
554  const AVCodec *audio_codec, *video_codec;
555  int ret;
556  int have_video = 0, have_audio = 0;
557  int encode_video = 0, encode_audio = 0;
558  AVDictionary *opt = NULL;
559  int i;
560 
561  if (argc < 2) {
562  printf("usage: %s output_file\n"
563  "API example program to output a media file with libavformat.\n"
564  "This program generates a synthetic audio and video stream, encodes and\n"
565  "muxes them into a file named output_file.\n"
566  "The output format is automatically guessed according to the file extension.\n"
567  "Raw images can also be output by using '%%d' in the filename.\n"
568  "\n", argv[0]);
569  return 1;
570  }
571 
572  filename = argv[1];
573  for (i = 2; i+1 < argc; i+=2) {
574  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
575  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
576  }
577 
578  /* allocate the output media context */
579  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
580  if (!oc) {
581  printf("Could not deduce output format from file extension: using MPEG.\n");
582  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
583  }
584  if (!oc)
585  return 1;
586 
587  fmt = oc->oformat;
588 
589  /* Add the audio and video streams using the default format codecs
590  * and initialize the codecs. */
591  if (fmt->video_codec != AV_CODEC_ID_NONE) {
592  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
593  have_video = 1;
594  encode_video = 1;
595  }
596  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
597  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
598  have_audio = 1;
599  encode_audio = 1;
600  }
601 
602  /* Now that all the parameters are set, we can open the audio and
603  * video codecs and allocate the necessary encode buffers. */
604  if (have_video)
605  open_video(oc, video_codec, &video_st, opt);
606 
607  if (have_audio)
608  open_audio(oc, audio_codec, &audio_st, opt);
609 
610  av_dump_format(oc, 0, filename, 1);
611 
612  /* open the output file, if needed */
613  if (!(fmt->flags & AVFMT_NOFILE)) {
614  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
615  if (ret < 0) {
616  fprintf(stderr, "Could not open '%s': %s\n", filename,
617  av_err2str(ret));
618  return 1;
619  }
620  }
621 
622  /* Write the stream header, if any. */
623  ret = avformat_write_header(oc, &opt);
624  if (ret < 0) {
625  fprintf(stderr, "Error occurred when opening output file: %s\n",
626  av_err2str(ret));
627  return 1;
628  }
629 
630  while (encode_video || encode_audio) {
631  /* select the stream to encode */
632  if (encode_video &&
633  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
634  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
635  encode_video = !write_video_frame(oc, &video_st);
636  } else {
637  encode_audio = !write_audio_frame(oc, &audio_st);
638  }
639  }
640 
641  /* Write the trailer, if any. The trailer must be written before you
642  * close the CodecContexts open when you wrote the header; otherwise
643  * av_write_trailer() may try to use memory that was freed on
644  * av_codec_close(). */
645  av_write_trailer(oc);
646 
647  /* Close each codec. */
648  if (have_video)
649  close_stream(oc, &video_st);
650  if (have_audio)
651  close_stream(oc, &audio_st);
652 
653  if (!(fmt->flags & AVFMT_NOFILE))
654  /* Close the output file. */
655  avio_closep(&oc->pb);
656 
657  /* free the stream */
659 
660  return 0;
661 }
AVCodec
AVCodec.
Definition: codec.h:202
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:69
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:389
OutputStream::tincr
float tincr
Definition: muxing.c:67
OutputStream::samples_count
int samples_count
Definition: muxing.c:60
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:760
OutputStream::enc
AVCodecContext * enc
Definition: muxing.c:56
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:146
alloc_picture
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: muxing.c:395
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:246
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:90
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:112
avcodec_find_encoder
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:915
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:310
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:490
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1261
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:415
AVFrame::width
int width
Definition: frame.h:380
fill_yuv_image
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: muxing.c:463
open_audio
static void open_audio(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:254
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1204
AVDictionary
Definition: dict.c:30
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:329
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:268
get_video_frame
static AVFrame * get_video_frame(OutputStream *ost)
Definition: muxing.c:484
STREAM_PIX_FMT
#define STREAM_PIX_FMT
Definition: muxing.c:49
OutputStream::next_pts
int64_t next_pts
Definition: muxing.c:59
write_audio_frame
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:349
OutputStream::tmp_frame
AVFrame * tmp_frame
Definition: muxing.c:63
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:83
AV_CH_LAYOUT_STEREO
#define AV_CH_LAYOUT_STEREO
Definition: channel_layout.h:91
open_video
static void open_video(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:418
swr_get_delay
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
Definition: swresample.c:867
main
int main(int argc, char **argv)
Definition: muxing.c:548
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:99
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
OutputStream::swr_ctx
struct SwrContext * swr_ctx
Definition: muxing.c:70
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:621
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
alloc_audio_frame
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, uint64_t channel_layout, int sample_rate, int nb_samples)
Definition: muxing.c:226
get_audio_frame
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: muxing.c:320
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:141
width
#define width
OutputStream::frame
AVFrame * frame
Definition: muxing.c:62
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
STREAM_DURATION
#define STREAM_DURATION
Definition: muxing.c:47
swr_alloc
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
Definition: options.c:150
AVOutputFormat::audio_codec
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:507
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:622
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
log_packet
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: muxing.c:73
codec_id
enum AVCodecID codec_id
Definition: vaapi_decode.c:369
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:472
if
if(ret)
Definition: filter_design.txt:179
AVFormatContext
Format I/O context.
Definition: avformat.h:1193
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1088
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:958
NULL
#define NULL
Definition: coverity.c:32
fmt_ctx
static AVFormatContext * fmt_ctx
Definition: demuxing_decoding.c:38
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:156
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1235
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:134
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:137
OutputStream::sws_ctx
struct SwsContext * sws_ctx
Definition: muxing.c:69
audio_st
AVStream * audio_st
Definition: movenc.c:60
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:226
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:589
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:51
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:47
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1249
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:516
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:506
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:57
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
SCALE_FLAGS
#define SCALE_FLAGS
Definition: muxing.c:51
sws_getContext
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:2015
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
video_st
AVStream * video_st
Definition: movenc.c:60
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:464
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:395
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
height
#define height
OutputStream::t
float t
Definition: muxing.c:67
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:521
M_PI
#define M_PI
Definition: mathematics.h:52
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:993
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:454
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1257
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1249
AVFMT_GLOBALHEADER
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:467
AV_CODEC_ID_NONE
@ AV_CODEC_ID_NONE
Definition: codec_id.h:48
AVOutputFormat
Definition: avformat.h:496
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
write_frame
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame, AVPacket *pkt)
Definition: muxing.c:84
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:358
avcodec.h
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:942
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:928
OutputStream::tincr2
float tincr2
Definition: muxing.c:67
OutputStream::tmp_pkt
AVPacket * tmp_pkt
Definition: muxing.c:65
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVFormatContext::oformat
const struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1212
avformat.h
ost
OutputStream * ost
Definition: ffmpeg_filter.c:163
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:380
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:936
channel_layout.h
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2346
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:680
AVOutputFormat::video_codec
enum AVCodecID video_codec
default video codec
Definition: avformat.h:508
avio_open
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1198
AVPacket::stream_index
int stream_index
Definition: packet.h:375
close_stream
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:535
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPacket
This structure stores compressed data.
Definition: packet.h:350
STREAM_FRAME_RATE
#define STREAM_FRAME_RATE
Definition: muxing.c:48
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1234
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
add_stream
static void add_stream(OutputStream *ost, AVFormatContext *oc, const AVCodec **codec, enum AVCodecID codec_id)
Definition: muxing.c:126
write_video_frame
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:530
timestamp.h
OutputStream
Definition: muxing.c:54
OutputStream::st
AVStream * st
Definition: muxing.c:55
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:353
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
avformat_alloc_output_context2
int avformat_alloc_output_context2(AVFormatContext **ctx, const AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:136
int
int
Definition: ffmpeg_filter.c:156
SwsContext
Definition: swscale_internal.h:300
av_opt_set_sample_fmt
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
Definition: opt.c:707
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:52
swscale.h