FFmpeg
muxing.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a copy
5  * of this software and associated documentation files (the "Software"), to deal
6  * in the Software without restriction, including without limitation the rights
7  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8  * copies of the Software, and to permit persons to whom the Software is
9  * furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20  * THE SOFTWARE.
21  */
22 
23 /**
24  * @file
25  * libavformat API example.
26  *
27  * Output a media file in any supported libavformat format. The default
28  * codecs are used.
29  * @example muxing.c
30  */
31 
32 #include <stdlib.h>
33 #include <stdio.h>
34 #include <string.h>
35 #include <math.h>
36 
37 #include <libavutil/avassert.h>
39 #include <libavutil/opt.h>
40 #include <libavutil/mathematics.h>
41 #include <libavutil/timestamp.h>
42 #include <libavcodec/avcodec.h>
43 #include <libavformat/avformat.h>
44 #include <libswscale/swscale.h>
46 
47 #define STREAM_DURATION 10.0
48 #define STREAM_FRAME_RATE 25 /* 25 images/s */
49 #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
50 
51 #define SCALE_FLAGS SWS_BICUBIC
52 
53 // a wrapper around a single output AVStream
54 typedef struct OutputStream {
57 
58  /* pts of the next frame that will be generated */
59  int64_t next_pts;
61 
64 
66 
67  float t, tincr, tincr2;
68 
71 } OutputStream;
72 
73 static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
74 {
76 
77  printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
78  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
79  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
81  pkt->stream_index);
82 }
83 
86 {
87  int ret;
88 
89  // send the frame to the encoder
91  if (ret < 0) {
92  fprintf(stderr, "Error sending a frame to the encoder: %s\n",
93  av_err2str(ret));
94  exit(1);
95  }
96 
97  while (ret >= 0) {
99  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
100  break;
101  else if (ret < 0) {
102  fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
103  exit(1);
104  }
105 
106  /* rescale output packet timestamp values from codec to stream timebase */
107  av_packet_rescale_ts(pkt, c->time_base, st->time_base);
108  pkt->stream_index = st->index;
109 
110  /* Write the compressed frame to the media file. */
113  /* pkt is now blank (av_interleaved_write_frame() takes ownership of
114  * its contents and resets pkt), so that no unreferencing is necessary.
115  * This would be different if one used av_write_frame(). */
116  if (ret < 0) {
117  fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
118  exit(1);
119  }
120  }
121 
122  return ret == AVERROR_EOF ? 1 : 0;
123 }
124 
125 /* Add an output stream. */
127  const AVCodec **codec,
128  enum AVCodecID codec_id)
129 {
130  AVCodecContext *c;
131  int i;
132 
133  /* find the encoder */
134  *codec = avcodec_find_encoder(codec_id);
135  if (!(*codec)) {
136  fprintf(stderr, "Could not find encoder for '%s'\n",
138  exit(1);
139  }
140 
141  ost->tmp_pkt = av_packet_alloc();
142  if (!ost->tmp_pkt) {
143  fprintf(stderr, "Could not allocate AVPacket\n");
144  exit(1);
145  }
146 
147  ost->st = avformat_new_stream(oc, NULL);
148  if (!ost->st) {
149  fprintf(stderr, "Could not allocate stream\n");
150  exit(1);
151  }
152  ost->st->id = oc->nb_streams-1;
153  c = avcodec_alloc_context3(*codec);
154  if (!c) {
155  fprintf(stderr, "Could not alloc an encoding context\n");
156  exit(1);
157  }
158  ost->enc = c;
159 
160  switch ((*codec)->type) {
161  case AVMEDIA_TYPE_AUDIO:
162  c->sample_fmt = (*codec)->sample_fmts ?
163  (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
164  c->bit_rate = 64000;
165  c->sample_rate = 44100;
166  if ((*codec)->supported_samplerates) {
167  c->sample_rate = (*codec)->supported_samplerates[0];
168  for (i = 0; (*codec)->supported_samplerates[i]; i++) {
169  if ((*codec)->supported_samplerates[i] == 44100)
170  c->sample_rate = 44100;
171  }
172  }
174  ost->st->time_base = (AVRational){ 1, c->sample_rate };
175  break;
176 
177  case AVMEDIA_TYPE_VIDEO:
178  c->codec_id = codec_id;
179 
180  c->bit_rate = 400000;
181  /* Resolution must be a multiple of two. */
182  c->width = 352;
183  c->height = 288;
184  /* timebase: This is the fundamental unit of time (in seconds) in terms
185  * of which frame timestamps are represented. For fixed-fps content,
186  * timebase should be 1/framerate and timestamp increments should be
187  * identical to 1. */
188  ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
189  c->time_base = ost->st->time_base;
190 
191  c->gop_size = 12; /* emit one intra frame every twelve frames at most */
192  c->pix_fmt = STREAM_PIX_FMT;
193  if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
194  /* just for testing, we also add B-frames */
195  c->max_b_frames = 2;
196  }
197  if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
198  /* Needed to avoid using macroblocks in which some coeffs overflow.
199  * This does not happen with normal video, it just happens here as
200  * the motion of the chroma plane does not match the luma plane. */
201  c->mb_decision = 2;
202  }
203  break;
204 
205  default:
206  break;
207  }
208 
209  /* Some formats want stream headers to be separate. */
210  if (oc->oformat->flags & AVFMT_GLOBALHEADER)
211  c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
212 }
213 
214 /**************************************************************/
215 /* audio output */
216 
217 static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
218  const AVChannelLayout *channel_layout,
219  int sample_rate, int nb_samples)
220 {
222  if (!frame) {
223  fprintf(stderr, "Error allocating an audio frame\n");
224  exit(1);
225  }
226 
227  frame->format = sample_fmt;
228  av_channel_layout_copy(&frame->ch_layout, channel_layout);
229  frame->sample_rate = sample_rate;
230  frame->nb_samples = nb_samples;
231 
232  if (nb_samples) {
233  if (av_frame_get_buffer(frame, 0) < 0) {
234  fprintf(stderr, "Error allocating an audio buffer\n");
235  exit(1);
236  }
237  }
238 
239  return frame;
240 }
241 
242 static void open_audio(AVFormatContext *oc, const AVCodec *codec,
243  OutputStream *ost, AVDictionary *opt_arg)
244 {
245  AVCodecContext *c;
246  int nb_samples;
247  int ret;
248  AVDictionary *opt = NULL;
249 
250  c = ost->enc;
251 
252  /* open it */
253  av_dict_copy(&opt, opt_arg, 0);
254  ret = avcodec_open2(c, codec, &opt);
255  av_dict_free(&opt);
256  if (ret < 0) {
257  fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
258  exit(1);
259  }
260 
261  /* init signal generator */
262  ost->t = 0;
263  ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
264  /* increment frequency by 110 Hz per second */
265  ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
266 
267  if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
268  nb_samples = 10000;
269  else
270  nb_samples = c->frame_size;
271 
272  ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
273  c->sample_rate, nb_samples);
274  ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
275  c->sample_rate, nb_samples);
276 
277  /* copy the stream parameters to the muxer */
279  if (ret < 0) {
280  fprintf(stderr, "Could not copy the stream parameters\n");
281  exit(1);
282  }
283 
284  /* create resampler context */
285  ost->swr_ctx = swr_alloc();
286  if (!ost->swr_ctx) {
287  fprintf(stderr, "Could not allocate resampler context\n");
288  exit(1);
289  }
290 
291  /* set options */
292  av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
293  av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
294  av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
295  av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
296  av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
297  av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
298 
299  /* initialize the resampling context */
300  if ((ret = swr_init(ost->swr_ctx)) < 0) {
301  fprintf(stderr, "Failed to initialize the resampling context\n");
302  exit(1);
303  }
304 }
305 
306 /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
307  * 'nb_channels' channels. */
309 {
310  AVFrame *frame = ost->tmp_frame;
311  int j, i, v;
312  int16_t *q = (int16_t*)frame->data[0];
313 
314  /* check if we want to generate more frames */
315  if (av_compare_ts(ost->next_pts, ost->enc->time_base,
316  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
317  return NULL;
318 
319  for (j = 0; j <frame->nb_samples; j++) {
320  v = (int)(sin(ost->t) * 10000);
321  for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
322  *q++ = v;
323  ost->t += ost->tincr;
324  ost->tincr += ost->tincr2;
325  }
326 
327  frame->pts = ost->next_pts;
328  ost->next_pts += frame->nb_samples;
329 
330  return frame;
331 }
332 
333 /*
334  * encode one audio frame and send it to the muxer
335  * return 1 when encoding is finished, 0 otherwise
336  */
338 {
339  AVCodecContext *c;
340  AVFrame *frame;
341  int ret;
342  int dst_nb_samples;
343 
344  c = ost->enc;
345 
347 
348  if (frame) {
349  /* convert samples from native format to destination codec format, using the resampler */
350  /* compute destination number of samples */
351  dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
352  c->sample_rate, c->sample_rate, AV_ROUND_UP);
353  av_assert0(dst_nb_samples == frame->nb_samples);
354 
355  /* when we pass a frame to the encoder, it may keep a reference to it
356  * internally;
357  * make sure we do not overwrite it here
358  */
359  ret = av_frame_make_writable(ost->frame);
360  if (ret < 0)
361  exit(1);
362 
363  /* convert to destination format */
364  ret = swr_convert(ost->swr_ctx,
365  ost->frame->data, dst_nb_samples,
366  (const uint8_t **)frame->data, frame->nb_samples);
367  if (ret < 0) {
368  fprintf(stderr, "Error while converting\n");
369  exit(1);
370  }
371  frame = ost->frame;
372 
373  frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
374  ost->samples_count += dst_nb_samples;
375  }
376 
377  return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
378 }
379 
380 /**************************************************************/
381 /* video output */
382 
384 {
385  AVFrame *picture;
386  int ret;
387 
388  picture = av_frame_alloc();
389  if (!picture)
390  return NULL;
391 
392  picture->format = pix_fmt;
393  picture->width = width;
394  picture->height = height;
395 
396  /* allocate the buffers for the frame data */
397  ret = av_frame_get_buffer(picture, 0);
398  if (ret < 0) {
399  fprintf(stderr, "Could not allocate frame data.\n");
400  exit(1);
401  }
402 
403  return picture;
404 }
405 
406 static void open_video(AVFormatContext *oc, const AVCodec *codec,
407  OutputStream *ost, AVDictionary *opt_arg)
408 {
409  int ret;
410  AVCodecContext *c = ost->enc;
411  AVDictionary *opt = NULL;
412 
413  av_dict_copy(&opt, opt_arg, 0);
414 
415  /* open the codec */
416  ret = avcodec_open2(c, codec, &opt);
417  av_dict_free(&opt);
418  if (ret < 0) {
419  fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
420  exit(1);
421  }
422 
423  /* allocate and init a re-usable frame */
424  ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
425  if (!ost->frame) {
426  fprintf(stderr, "Could not allocate video frame\n");
427  exit(1);
428  }
429 
430  /* If the output format is not YUV420P, then a temporary YUV420P
431  * picture is needed too. It is then converted to the required
432  * output format. */
433  ost->tmp_frame = NULL;
434  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
435  ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
436  if (!ost->tmp_frame) {
437  fprintf(stderr, "Could not allocate temporary picture\n");
438  exit(1);
439  }
440  }
441 
442  /* copy the stream parameters to the muxer */
444  if (ret < 0) {
445  fprintf(stderr, "Could not copy the stream parameters\n");
446  exit(1);
447  }
448 }
449 
450 /* Prepare a dummy image. */
451 static void fill_yuv_image(AVFrame *pict, int frame_index,
452  int width, int height)
453 {
454  int x, y, i;
455 
456  i = frame_index;
457 
458  /* Y */
459  for (y = 0; y < height; y++)
460  for (x = 0; x < width; x++)
461  pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
462 
463  /* Cb and Cr */
464  for (y = 0; y < height / 2; y++) {
465  for (x = 0; x < width / 2; x++) {
466  pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
467  pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
468  }
469  }
470 }
471 
473 {
474  AVCodecContext *c = ost->enc;
475 
476  /* check if we want to generate more frames */
477  if (av_compare_ts(ost->next_pts, c->time_base,
478  STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
479  return NULL;
480 
481  /* when we pass a frame to the encoder, it may keep a reference to it
482  * internally; make sure we do not overwrite it here */
483  if (av_frame_make_writable(ost->frame) < 0)
484  exit(1);
485 
486  if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
487  /* as we only generate a YUV420P picture, we must convert it
488  * to the codec pixel format if needed */
489  if (!ost->sws_ctx) {
490  ost->sws_ctx = sws_getContext(c->width, c->height,
492  c->width, c->height,
493  c->pix_fmt,
495  if (!ost->sws_ctx) {
496  fprintf(stderr,
497  "Could not initialize the conversion context\n");
498  exit(1);
499  }
500  }
501  fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
502  sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
503  ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
504  ost->frame->linesize);
505  } else {
506  fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
507  }
508 
509  ost->frame->pts = ost->next_pts++;
510 
511  return ost->frame;
512 }
513 
514 /*
515  * encode one video frame and send it to the muxer
516  * return 1 when encoding is finished, 0 otherwise
517  */
519 {
520  return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
521 }
522 
524 {
525  avcodec_free_context(&ost->enc);
526  av_frame_free(&ost->frame);
527  av_frame_free(&ost->tmp_frame);
528  av_packet_free(&ost->tmp_pkt);
529  sws_freeContext(ost->sws_ctx);
530  swr_free(&ost->swr_ctx);
531 }
532 
533 /**************************************************************/
534 /* media file output */
535 
536 int main(int argc, char **argv)
537 {
538  OutputStream video_st = { 0 }, audio_st = { 0 };
539  const AVOutputFormat *fmt;
540  const char *filename;
541  AVFormatContext *oc;
542  const AVCodec *audio_codec, *video_codec;
543  int ret;
544  int have_video = 0, have_audio = 0;
545  int encode_video = 0, encode_audio = 0;
546  AVDictionary *opt = NULL;
547  int i;
548 
549  if (argc < 2) {
550  printf("usage: %s output_file\n"
551  "API example program to output a media file with libavformat.\n"
552  "This program generates a synthetic audio and video stream, encodes and\n"
553  "muxes them into a file named output_file.\n"
554  "The output format is automatically guessed according to the file extension.\n"
555  "Raw images can also be output by using '%%d' in the filename.\n"
556  "\n", argv[0]);
557  return 1;
558  }
559 
560  filename = argv[1];
561  for (i = 2; i+1 < argc; i+=2) {
562  if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
563  av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
564  }
565 
566  /* allocate the output media context */
567  avformat_alloc_output_context2(&oc, NULL, NULL, filename);
568  if (!oc) {
569  printf("Could not deduce output format from file extension: using MPEG.\n");
570  avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
571  }
572  if (!oc)
573  return 1;
574 
575  fmt = oc->oformat;
576 
577  /* Add the audio and video streams using the default format codecs
578  * and initialize the codecs. */
579  if (fmt->video_codec != AV_CODEC_ID_NONE) {
580  add_stream(&video_st, oc, &video_codec, fmt->video_codec);
581  have_video = 1;
582  encode_video = 1;
583  }
584  if (fmt->audio_codec != AV_CODEC_ID_NONE) {
585  add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
586  have_audio = 1;
587  encode_audio = 1;
588  }
589 
590  /* Now that all the parameters are set, we can open the audio and
591  * video codecs and allocate the necessary encode buffers. */
592  if (have_video)
593  open_video(oc, video_codec, &video_st, opt);
594 
595  if (have_audio)
596  open_audio(oc, audio_codec, &audio_st, opt);
597 
598  av_dump_format(oc, 0, filename, 1);
599 
600  /* open the output file, if needed */
601  if (!(fmt->flags & AVFMT_NOFILE)) {
602  ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
603  if (ret < 0) {
604  fprintf(stderr, "Could not open '%s': %s\n", filename,
605  av_err2str(ret));
606  return 1;
607  }
608  }
609 
610  /* Write the stream header, if any. */
611  ret = avformat_write_header(oc, &opt);
612  if (ret < 0) {
613  fprintf(stderr, "Error occurred when opening output file: %s\n",
614  av_err2str(ret));
615  return 1;
616  }
617 
618  while (encode_video || encode_audio) {
619  /* select the stream to encode */
620  if (encode_video &&
621  (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
622  audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
623  encode_video = !write_video_frame(oc, &video_st);
624  } else {
625  encode_audio = !write_audio_frame(oc, &audio_st);
626  }
627  }
628 
629  av_write_trailer(oc);
630 
631  /* Close each codec. */
632  if (have_video)
633  close_stream(oc, &video_st);
634  if (have_audio)
635  close_stream(oc, &audio_st);
636 
637  if (!(fmt->flags & AVFMT_NOFILE))
638  /* Close the output file. */
639  avio_closep(&oc->pb);
640 
641  /* free the stream */
643 
644  return 0;
645 }
AVCodec
AVCodec.
Definition: codec.h:184
AV_SAMPLE_FMT_FLTP
@ AV_SAMPLE_FMT_FLTP
float, planar
Definition: samplefmt.h:66
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:511
OutputStream::tincr
float tincr
Definition: muxing.c:67
OutputStream::samples_count
int samples_count
Definition: muxing.c:60
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: options.c:237
OutputStream::enc
AVCodecContext * enc
Definition: muxing.c:56
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
alloc_picture
static AVFrame * alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
Definition: muxing.c:383
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:242
AV_CHANNEL_LAYOUT_STEREO
#define AV_CHANNEL_LAYOUT_STEREO
Definition: channel_layout.h:369
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:99
alloc_audio_frame
static AVFrame * alloc_audio_frame(enum AVSampleFormat sample_fmt, const AVChannelLayout *channel_layout, int sample_rate, int nb_samples)
Definition: muxing.c:217
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:99
avcodec_find_encoder
const AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:958
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:330
av_frame_make_writable
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:537
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1172
AVFrame::width
int width
Definition: frame.h:402
fill_yuv_image
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
Definition: muxing.c:451
open_audio
static void open_audio(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:242
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1203
AVDictionary
Definition: dict.c:32
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
sample_rate
sample_rate
Definition: ffmpeg_filter.c:156
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:351
AV_CODEC_FLAG_GLOBAL_HEADER
#define AV_CODEC_FLAG_GLOBAL_HEADER
Place global headers in extradata instead of every keyframe.
Definition: avcodec.h:317
get_video_frame
static AVFrame * get_video_frame(OutputStream *ost)
Definition: muxing.c:472
STREAM_PIX_FMT
#define STREAM_PIX_FMT
Definition: muxing.c:49
OutputStream::next_pts
int64_t next_pts
Definition: muxing.c:59
write_audio_frame
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:337
OutputStream::tmp_frame
AVFrame * tmp_frame
Definition: muxing.c:63
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:83
open_video
static void open_video(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
Definition: muxing.c:406
swr_get_delay
int64_t swr_get_delay(struct SwrContext *s, int64_t base)
Gets the delay the next input sample will experience relative to the next output sample.
Definition: swresample.c:971
main
int main(int argc, char **argv)
Definition: muxing.c:536
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:87
avassert.h
pkt
AVPacket * pkt
Definition: movenc.c:59
OutputStream::swr_ctx
struct SwrContext * swr_ctx
Definition: muxing.c:70
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:629
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:191
get_audio_frame
static AVFrame * get_audio_frame(OutputStream *ost)
Definition: muxing.c:308
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:153
width
#define width
OutputStream::frame
AVFrame * frame
Definition: muxing.c:62
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
STREAM_DURATION
#define STREAM_DURATION
Definition: muxing.c:47
swr_alloc
av_cold struct SwrContext * swr_alloc(void)
Allocate SwrContext.
Definition: options.c:167
AVOutputFormat::audio_codec
enum AVCodecID audio_codec
default audio codec
Definition: avformat.h:518
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:624
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:41
log_packet
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
Definition: muxing.c:73
codec_id
enum AVCodecID codec_id
Definition: vaapi_decode.c:388
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:451
if
if(ret)
Definition: filter_design.txt:179
AVFormatContext
Format I/O context.
Definition: avformat.h:1104
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:861
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:877
NULL
#define NULL
Definition: coverity.c:32
fmt_ctx
static AVFormatContext * fmt_ctx
Definition: demuxing_decoding.c:38
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:168
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1146
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:125
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
OutputStream::sws_ctx
struct SwsContext * sws_ctx
Definition: muxing.c:69
audio_st
AVStream * audio_st
Definition: movenc.c:60
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:624
AV_CODEC_ID_MPEG1VIDEO
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:53
AVCodecID
AVCodecID
Identify the syntax and semantics of the bitstream.
Definition: codec_id.h:49
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1160
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:527
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
SCALE_FLAGS
#define SCALE_FLAGS
Definition: muxing.c:51
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:301
sws_getContext
struct SwsContext * sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Allocate and return an SwsContext.
Definition: utils.c:2084
av_opt_set_chlayout
int av_opt_set_chlayout(void *obj, const char *name, const AVChannelLayout *channel_layout, int search_flags)
Definition: opt.c:786
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
video_st
AVStream * video_st
Definition: movenc.c:60
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:173
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:468
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t **out_arg, int out_count, const uint8_t **in_arg, int in_count)
Convert audio.
Definition: swresample.c:816
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:417
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
height
#define height
OutputStream::t
float t
Definition: muxing.c:67
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:526
M_PI
#define M_PI
Definition: mathematics.h:52
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:438
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1280
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1256
AVFMT_GLOBALHEADER
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:478
AV_CODEC_ID_NONE
@ AV_CODEC_ID_NONE
Definition: codec_id.h:50
AVOutputFormat
Definition: avformat.h:507
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
write_frame
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame, AVPacket *pkt)
Definition: muxing.c:84
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:478
avcodec.h
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:850
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:838
OutputStream::tincr2
float tincr2
Definition: muxing.c:67
OutputStream::tmp_pkt
AVPacket * tmp_pkt
Definition: muxing.c:65
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AVFormatContext::oformat
const struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1123
avformat.h
AVCodecContext
main external API structure.
Definition: avcodec.h:426
AVFrame::height
int height
Definition: frame.h:402
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:844
channel_layout.h
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2415
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: avformat.c:96
AVOutputFormat::video_codec
enum AVCodecID video_codec
default video codec
Definition: avformat.h:519
avio_open
int avio_open(AVIOContext **s, const char *url, int flags)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1215
AVPacket::stream_index
int stream_index
Definition: packet.h:376
close_stream
static void close_stream(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:523
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:639
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVPacket
This structure stores compressed data.
Definition: packet.h:351
STREAM_FRAME_RATE
#define STREAM_FRAME_RATE
Definition: muxing.c:48
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1241
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:86
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:237
add_stream
static void add_stream(OutputStream *ost, AVFormatContext *oc, const AVCodec **codec, enum AVCodecID codec_id)
Definition: muxing.c:126
write_video_frame
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
Definition: muxing.c:518
timestamp.h
OutputStream
Definition: muxing.c:54
OutputStream::st
AVStream * st
Definition: muxing.c:55
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:375
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
avformat_alloc_output_context2
int avformat_alloc_output_context2(AVFormatContext **ctx, const AVOutputFormat *oformat, const char *format_name, const char *filename)
Allocate an AVFormatContext for an output format.
Definition: mux.c:91
int
int
Definition: ffmpeg_filter.c:156
SwsContext
Definition: swscale_internal.h:299
av_opt_set_sample_fmt
int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags)
Definition: opt.c:742
AV_CODEC_ID_MPEG2VIDEO
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:54
swscale.h