FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_ISATTY
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #endif
43 
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
47 #include "libavutil/opt.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/mathematics.h"
56 #include "libavutil/pixdesc.h"
57 #include "libavutil/avstring.h"
58 #include "libavutil/libm.h"
59 #include "libavutil/imgutils.h"
60 #include "libavutil/timestamp.h"
61 #include "libavutil/bprint.h"
62 #include "libavutil/time.h"
64 #include "libavcodec/mathops.h"
65 #include "libavformat/os_support.h"
66 
67 # include "libavfilter/avcodec.h"
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
129 
130 static int run_as_daemon = 0;
131 static int nb_frames_dup = 0;
132 static int nb_frames_drop = 0;
133 static int64_t decode_error_stat[2];
134 
135 static int current_time;
137 
139 
144 
149 
152 
153 #if HAVE_TERMIOS_H
154 
155 /* init terminal so that we can grab keys */
156 static struct termios oldtty;
157 static int restore_tty;
158 #endif
159 
160 #if HAVE_PTHREADS
161 static void free_input_threads(void);
162 #endif
163 
164 /* sub2video hack:
165  Convert subtitles to video with alpha to insert them in filter graphs.
166  This is a temporary solution until libavfilter gets real subtitles support.
167  */
168 
170 {
171  int ret;
172  AVFrame *frame = ist->sub2video.frame;
173 
174  av_frame_unref(frame);
175  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
176  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
178  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
179  return ret;
180  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
181  return 0;
182 }
183 
184 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
185  AVSubtitleRect *r)
186 {
187  uint32_t *pal, *dst2;
188  uint8_t *src, *src2;
189  int x, y;
190 
191  if (r->type != SUBTITLE_BITMAP) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
193  return;
194  }
195  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
197  r->x, r->y, r->w, r->h, w, h
198  );
199  return;
200  }
201 
202  dst += r->y * dst_linesize + r->x * 4;
203  src = r->pict.data[0];
204  pal = (uint32_t *)r->pict.data[1];
205  for (y = 0; y < r->h; y++) {
206  dst2 = (uint32_t *)dst;
207  src2 = src;
208  for (x = 0; x < r->w; x++)
209  *(dst2++) = pal[*(src2++)];
210  dst += dst_linesize;
211  src += r->pict.linesize[0];
212  }
213 }
214 
215 static void sub2video_push_ref(InputStream *ist, int64_t pts)
216 {
217  AVFrame *frame = ist->sub2video.frame;
218  int i;
219 
220  av_assert1(frame->data[0]);
221  ist->sub2video.last_pts = frame->pts = pts;
222  for (i = 0; i < ist->nb_filters; i++)
226 }
227 
228 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
229 {
230  AVFrame *frame = ist->sub2video.frame;
231  int8_t *dst;
232  int dst_linesize;
233  int num_rects, i;
234  int64_t pts, end_pts;
235 
236  if (!frame)
237  return;
238  if (sub) {
239  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
240  AV_TIME_BASE_Q, ist->st->time_base);
241  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
242  AV_TIME_BASE_Q, ist->st->time_base);
243  num_rects = sub->num_rects;
244  } else {
245  pts = ist->sub2video.end_pts;
246  end_pts = INT64_MAX;
247  num_rects = 0;
248  }
249  if (sub2video_get_blank_frame(ist) < 0) {
251  "Impossible to get a blank canvas.\n");
252  return;
253  }
254  dst = frame->data [0];
255  dst_linesize = frame->linesize[0];
256  for (i = 0; i < num_rects; i++)
257  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
258  sub2video_push_ref(ist, pts);
259  ist->sub2video.end_pts = end_pts;
260 }
261 
262 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
263 {
264  InputFile *infile = input_files[ist->file_index];
265  int i, j, nb_reqs;
266  int64_t pts2;
267 
268  /* When a frame is read from a file, examine all sub2video streams in
269  the same file and send the sub2video frame again. Otherwise, decoded
270  video frames could be accumulating in the filter graph while a filter
271  (possibly overlay) is desperately waiting for a subtitle frame. */
272  for (i = 0; i < infile->nb_streams; i++) {
273  InputStream *ist2 = input_streams[infile->ist_index + i];
274  if (!ist2->sub2video.frame)
275  continue;
276  /* subtitles seem to be usually muxed ahead of other streams;
277  if not, subtracting a larger time here is necessary */
278  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
279  /* do not send the heartbeat frame if the subtitle is already ahead */
280  if (pts2 <= ist2->sub2video.last_pts)
281  continue;
282  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
283  sub2video_update(ist2, NULL);
284  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
285  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
286  if (nb_reqs)
287  sub2video_push_ref(ist2, pts2);
288  }
289 }
290 
291 static void sub2video_flush(InputStream *ist)
292 {
293  int i;
294 
295  if (ist->sub2video.end_pts < INT64_MAX)
296  sub2video_update(ist, NULL);
297  for (i = 0; i < ist->nb_filters; i++)
299 }
300 
301 /* end of sub2video hack */
302 
303 static void term_exit_sigsafe(void)
304 {
305 #if HAVE_TERMIOS_H
306  if(restore_tty)
307  tcsetattr (0, TCSANOW, &oldtty);
308 #endif
309 }
310 
311 void term_exit(void)
312 {
313  av_log(NULL, AV_LOG_QUIET, "%s", "");
315 }
316 
317 static volatile int received_sigterm = 0;
318 static volatile int received_nb_signals = 0;
319 static volatile int transcode_init_done = 0;
320 static volatile int ffmpeg_exited = 0;
321 static int main_return_code = 0;
322 
323 static void
325 {
326  received_sigterm = sig;
329  if(received_nb_signals > 3) {
330  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
331  strlen("Received > 3 system signals, hard exiting\n"));
332 
333  exit(123);
334  }
335 }
336 
337 #if HAVE_SETCONSOLECTRLHANDLER
338 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
339 {
340  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
341 
342  switch (fdwCtrlType)
343  {
344  case CTRL_C_EVENT:
345  case CTRL_BREAK_EVENT:
346  sigterm_handler(SIGINT);
347  return TRUE;
348 
349  case CTRL_CLOSE_EVENT:
350  case CTRL_LOGOFF_EVENT:
351  case CTRL_SHUTDOWN_EVENT:
352  sigterm_handler(SIGTERM);
353  /* Basically, with these 3 events, when we return from this method the
354  process is hard terminated, so stall as long as we need to
355  to try and let the main thread(s) clean up and gracefully terminate
356  (we have at most 5 seconds, but should be done far before that). */
357  while (!ffmpeg_exited) {
358  Sleep(0);
359  }
360  return TRUE;
361 
362  default:
363  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
364  return FALSE;
365  }
366 }
367 #endif
368 
369 void term_init(void)
370 {
371 #if HAVE_TERMIOS_H
372  if(!run_as_daemon){
373  struct termios tty;
374  int istty = 1;
375 #if HAVE_ISATTY
376  istty = isatty(0) && isatty(2);
377 #endif
378  if (istty && tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  av_freep(&fg->inputs[j]->name);
480  av_freep(&fg->inputs[j]);
481  }
482  av_freep(&fg->inputs);
483  for (j = 0; j < fg->nb_outputs; j++) {
484  av_freep(&fg->outputs[j]->name);
485  av_freep(&fg->outputs[j]);
486  }
487  av_freep(&fg->outputs);
488  av_freep(&fg->graph_desc);
489 
490  av_freep(&filtergraphs[i]);
491  }
492  av_freep(&filtergraphs);
493 
495 
496  /* close files */
497  for (i = 0; i < nb_output_files; i++) {
498  OutputFile *of = output_files[i];
500  if (!of)
501  continue;
502  s = of->ctx;
503  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
504  avio_closep(&s->pb);
506  av_dict_free(&of->opts);
507 
508  av_freep(&output_files[i]);
509  }
510  for (i = 0; i < nb_output_streams; i++) {
511  OutputStream *ost = output_streams[i];
513 
514  if (!ost)
515  continue;
516 
517  bsfc = ost->bitstream_filters;
518  while (bsfc) {
519  AVBitStreamFilterContext *next = bsfc->next;
521  bsfc = next;
522  }
523  ost->bitstream_filters = NULL;
525  av_frame_free(&ost->last_frame);
526 
527  av_parser_close(ost->parser);
528 
529  av_freep(&ost->forced_keyframes);
531  av_freep(&ost->avfilter);
532  av_freep(&ost->logfile_prefix);
533 
535  ost->audio_channels_mapped = 0;
536 
538 
539  av_freep(&output_streams[i]);
540  }
541 #if HAVE_PTHREADS
542  free_input_threads();
543 #endif
544  for (i = 0; i < nb_input_files; i++) {
545  avformat_close_input(&input_files[i]->ctx);
546  av_freep(&input_files[i]);
547  }
548  for (i = 0; i < nb_input_streams; i++) {
549  InputStream *ist = input_streams[i];
550 
553  av_dict_free(&ist->decoder_opts);
556  av_freep(&ist->filters);
557  av_freep(&ist->hwaccel_device);
558 
560 
561  av_freep(&input_streams[i]);
562  }
563 
564  if (vstats_file)
565  fclose(vstats_file);
567 
568  av_freep(&input_streams);
569  av_freep(&input_files);
570  av_freep(&output_streams);
571  av_freep(&output_files);
572 
573  uninit_opts();
574 
576 
577  if (received_sigterm) {
578  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
579  (int) received_sigterm);
580  } else if (ret && transcode_init_done) {
581  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
582  }
583  term_exit();
584  ffmpeg_exited = 1;
585 }
586 
588 {
589  AVDictionaryEntry *t = NULL;
590 
591  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
593  }
594 }
595 
597 {
599  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
600  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
601  exit_program(1);
602  }
603 }
604 
605 static void abort_codec_experimental(AVCodec *c, int encoder)
606 {
607  exit_program(1);
608 }
609 
610 static void update_benchmark(const char *fmt, ...)
611 {
612  if (do_benchmark_all) {
613  int64_t t = getutime();
614  va_list va;
615  char buf[1024];
616 
617  if (fmt) {
618  va_start(va, fmt);
619  vsnprintf(buf, sizeof(buf), fmt, va);
620  va_end(va);
621  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
622  }
623  current_time = t;
624  }
625 }
626 
627 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
628 {
629  int i;
630  for (i = 0; i < nb_output_streams; i++) {
631  OutputStream *ost2 = output_streams[i];
632  ost2->finished |= ost == ost2 ? this_stream : others;
633  }
634 }
635 
637 {
639  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
640  int ret;
641 
642  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
644  if (ost->st->codec->extradata) {
645  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
647  }
648  }
649 
652  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
653 
654  /*
655  * Audio encoders may split the packets -- #frames in != #packets out.
656  * But there is no reordering, so we can limit the number of output packets
657  * by simply dropping them here.
658  * Counting encoded video frames needs to be done separately because of
659  * reordering, see do_video_out()
660  */
661  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
662  if (ost->frame_number >= ost->max_frames) {
663  av_free_packet(pkt);
664  return;
665  }
666  ost->frame_number++;
667  }
668  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
669  int i;
671  NULL);
672  ost->quality = sd ? AV_RL32(sd) : -1;
673  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
674 
675  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
676  if (sd && i < sd[5])
677  ost->error[i] = AV_RL64(sd + 8 + 8*i);
678  else
679  ost->error[i] = -1;
680  }
681  }
682 
683  if (bsfc)
685 
686  while (bsfc) {
687  AVPacket new_pkt = *pkt;
688  AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
689  bsfc->filter->name,
690  NULL, 0);
691  int a = av_bitstream_filter_filter(bsfc, avctx,
692  bsf_arg ? bsf_arg->value : NULL,
693  &new_pkt.data, &new_pkt.size,
694  pkt->data, pkt->size,
695  pkt->flags & AV_PKT_FLAG_KEY);
697  if(a == 0 && new_pkt.data != pkt->data
699  && new_pkt.destruct
700 #endif
701  ) {
703  uint8_t *t = av_malloc(new_pkt.size + AV_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
704  if(t) {
705  memcpy(t, new_pkt.data, new_pkt.size);
706  memset(t + new_pkt.size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
707  new_pkt.data = t;
708  new_pkt.buf = NULL;
709  a = 1;
710  } else
711  a = AVERROR(ENOMEM);
712  }
713  if (a > 0) {
714  pkt->side_data = NULL;
715  pkt->side_data_elems = 0;
716  av_free_packet(pkt);
717  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
718  av_buffer_default_free, NULL, 0);
719  if (!new_pkt.buf)
720  exit_program(1);
721  } else if (a < 0) {
722  new_pkt = *pkt;
723  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
724  bsfc->filter->name, pkt->stream_index,
725  avctx->codec ? avctx->codec->name : "copy");
726  print_error("", a);
727  if (exit_on_error)
728  exit_program(1);
729  }
730  *pkt = new_pkt;
731 
732  bsfc = bsfc->next;
733  }
734 
735  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
736  if (pkt->dts != AV_NOPTS_VALUE &&
737  pkt->pts != AV_NOPTS_VALUE &&
738  pkt->dts > pkt->pts) {
739  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
740  pkt->dts, pkt->pts,
741  ost->file_index, ost->st->index);
742  pkt->pts =
743  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
744  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
745  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
746  }
747  if(
748  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
749  pkt->dts != AV_NOPTS_VALUE &&
750  ost->last_mux_dts != AV_NOPTS_VALUE) {
751  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
752  if (pkt->dts < max) {
753  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
754  av_log(s, loglevel, "Non-monotonous DTS in output stream "
755  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
756  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
757  if (exit_on_error) {
758  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
759  exit_program(1);
760  }
761  av_log(s, loglevel, "changing to %"PRId64". This may result "
762  "in incorrect timestamps in the output file.\n",
763  max);
764  if(pkt->pts >= pkt->dts)
765  pkt->pts = FFMAX(pkt->pts, max);
766  pkt->dts = max;
767  }
768  }
769  }
770  ost->last_mux_dts = pkt->dts;
771 
772  ost->data_size += pkt->size;
773  ost->packets_written++;
774 
775  pkt->stream_index = ost->index;
776 
777  if (debug_ts) {
778  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
779  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
781  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
782  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
783  pkt->size
784  );
785  }
786 
787  ret = av_interleaved_write_frame(s, pkt);
788  if (ret < 0) {
789  print_error("av_interleaved_write_frame()", ret);
790  main_return_code = 1;
792  }
793  av_free_packet(pkt);
794 }
795 
797 {
798  OutputFile *of = output_files[ost->file_index];
799 
800  ost->finished |= ENCODER_FINISHED;
801  if (of->shortest) {
802  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
803  of->recording_time = FFMIN(of->recording_time, end);
804  }
805 }
806 
808 {
809  OutputFile *of = output_files[ost->file_index];
810 
811  if (of->recording_time != INT64_MAX &&
813  AV_TIME_BASE_Q) >= 0) {
814  close_output_stream(ost);
815  return 0;
816  }
817  return 1;
818 }
819 
821  AVFrame *frame)
822 {
823  AVCodecContext *enc = ost->enc_ctx;
824  AVPacket pkt;
825  int got_packet = 0;
826 
827  av_init_packet(&pkt);
828  pkt.data = NULL;
829  pkt.size = 0;
830 
831  if (!check_recording_time(ost))
832  return;
833 
834  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
835  frame->pts = ost->sync_opts;
836  ost->sync_opts = frame->pts + frame->nb_samples;
837  ost->samples_encoded += frame->nb_samples;
838  ost->frames_encoded++;
839 
840  av_assert0(pkt.size || !pkt.data);
842  if (debug_ts) {
843  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
844  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
845  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
846  enc->time_base.num, enc->time_base.den);
847  }
848 
849  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
850  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
851  exit_program(1);
852  }
853  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
854 
855  if (got_packet) {
856  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
857 
858  if (debug_ts) {
859  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
860  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
861  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
862  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
863  }
864 
865  write_frame(s, &pkt, ost);
866  }
867 }
868 
870  OutputStream *ost,
871  InputStream *ist,
872  AVSubtitle *sub)
873 {
874  int subtitle_out_max_size = 1024 * 1024;
875  int subtitle_out_size, nb, i;
876  AVCodecContext *enc;
877  AVPacket pkt;
878  int64_t pts;
879 
880  if (sub->pts == AV_NOPTS_VALUE) {
881  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
882  if (exit_on_error)
883  exit_program(1);
884  return;
885  }
886 
887  enc = ost->enc_ctx;
888 
889  if (!subtitle_out) {
890  subtitle_out = av_malloc(subtitle_out_max_size);
891  if (!subtitle_out) {
892  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
893  exit_program(1);
894  }
895  }
896 
897  /* Note: DVB subtitle need one packet to draw them and one other
898  packet to clear them */
899  /* XXX: signal it in the codec context ? */
901  nb = 2;
902  else
903  nb = 1;
904 
905  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
906  pts = sub->pts;
907  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
908  pts -= output_files[ost->file_index]->start_time;
909  for (i = 0; i < nb; i++) {
910  unsigned save_num_rects = sub->num_rects;
911 
912  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
913  if (!check_recording_time(ost))
914  return;
915 
916  sub->pts = pts;
917  // start_display_time is required to be 0
918  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
920  sub->start_display_time = 0;
921  if (i == 1)
922  sub->num_rects = 0;
923 
924  ost->frames_encoded++;
925 
926  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
927  subtitle_out_max_size, sub);
928  if (i == 1)
929  sub->num_rects = save_num_rects;
930  if (subtitle_out_size < 0) {
931  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
932  exit_program(1);
933  }
934 
935  av_init_packet(&pkt);
936  pkt.data = subtitle_out;
937  pkt.size = subtitle_out_size;
938  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
939  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
940  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
941  /* XXX: the pts correction is handled here. Maybe handling
942  it in the codec would be better */
943  if (i == 0)
944  pkt.pts += 90 * sub->start_display_time;
945  else
946  pkt.pts += 90 * sub->end_display_time;
947  }
948  pkt.dts = pkt.pts;
949  write_frame(s, &pkt, ost);
950  }
951 }
952 
954  OutputStream *ost,
955  AVFrame *next_picture,
956  double sync_ipts)
957 {
958  int ret, format_video_sync;
959  AVPacket pkt;
960  AVCodecContext *enc = ost->enc_ctx;
961  AVCodecContext *mux_enc = ost->st->codec;
962  int nb_frames, nb0_frames, i;
963  double delta, delta0;
964  double duration = 0;
965  int frame_size = 0;
966  InputStream *ist = NULL;
968 
969  if (ost->source_index >= 0)
970  ist = input_streams[ost->source_index];
971 
972  if (filter->inputs[0]->frame_rate.num > 0 &&
973  filter->inputs[0]->frame_rate.den > 0)
974  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
975 
976  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
977  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
978 
979  if (!ost->filters_script &&
980  !ost->filters &&
981  next_picture &&
982  ist &&
983  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
984  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
985  }
986 
987  if (!next_picture) {
988  //end, flushing
989  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
990  ost->last_nb0_frames[1],
991  ost->last_nb0_frames[2]);
992  } else {
993  delta0 = sync_ipts - ost->sync_opts;
994  delta = delta0 + duration;
995 
996  /* by default, we output a single frame */
997  nb0_frames = 0;
998  nb_frames = 1;
999 
1000  format_video_sync = video_sync_method;
1001  if (format_video_sync == VSYNC_AUTO) {
1002  if(!strcmp(s->oformat->name, "avi")) {
1003  format_video_sync = VSYNC_VFR;
1004  } else
1006  if ( ist
1007  && format_video_sync == VSYNC_CFR
1008  && input_files[ist->file_index]->ctx->nb_streams == 1
1009  && input_files[ist->file_index]->input_ts_offset == 0) {
1010  format_video_sync = VSYNC_VSCFR;
1011  }
1012  if (format_video_sync == VSYNC_CFR && copy_ts) {
1013  format_video_sync = VSYNC_VSCFR;
1014  }
1015  }
1016 
1017  if (delta0 < 0 &&
1018  delta > 0 &&
1019  format_video_sync != VSYNC_PASSTHROUGH &&
1020  format_video_sync != VSYNC_DROP) {
1021  double cor = FFMIN(-delta0, duration);
1022  if (delta0 < -0.6) {
1023  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1024  } else
1025  av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
1026  sync_ipts += cor;
1027  duration -= cor;
1028  delta0 += cor;
1029  }
1030 
1031  switch (format_video_sync) {
1032  case VSYNC_VSCFR:
1033  if (ost->frame_number == 0 && delta - duration >= 0.5) {
1034  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
1035  delta = duration;
1036  delta0 = 0;
1037  ost->sync_opts = lrint(sync_ipts);
1038  }
1039  case VSYNC_CFR:
1040  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1041  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1042  nb_frames = 0;
1043  } else if (delta < -1.1)
1044  nb_frames = 0;
1045  else if (delta > 1.1) {
1046  nb_frames = lrintf(delta);
1047  if (delta0 > 1.1)
1048  nb0_frames = lrintf(delta0 - 0.6);
1049  }
1050  break;
1051  case VSYNC_VFR:
1052  if (delta <= -0.6)
1053  nb_frames = 0;
1054  else if (delta > 0.6)
1055  ost->sync_opts = lrint(sync_ipts);
1056  break;
1057  case VSYNC_DROP:
1058  case VSYNC_PASSTHROUGH:
1059  ost->sync_opts = lrint(sync_ipts);
1060  break;
1061  default:
1062  av_assert0(0);
1063  }
1064  }
1065 
1066  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1067  nb0_frames = FFMIN(nb0_frames, nb_frames);
1068 
1069  memmove(ost->last_nb0_frames + 1,
1070  ost->last_nb0_frames,
1071  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1072  ost->last_nb0_frames[0] = nb0_frames;
1073 
1074  if (nb0_frames == 0 && ost->last_droped) {
1075  nb_frames_drop++;
1077  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1078  ost->frame_number, ost->st->index, ost->last_frame->pts);
1079  }
1080  if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
1081  if (nb_frames > dts_error_threshold * 30) {
1082  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1083  nb_frames_drop++;
1084  return;
1085  }
1086  nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1087  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1088  }
1089  ost->last_droped = nb_frames == nb0_frames && next_picture;
1090 
1091  /* duplicates frame if needed */
1092  for (i = 0; i < nb_frames; i++) {
1093  AVFrame *in_picture;
1094  av_init_packet(&pkt);
1095  pkt.data = NULL;
1096  pkt.size = 0;
1097 
1098  if (i < nb0_frames && ost->last_frame) {
1099  in_picture = ost->last_frame;
1100  } else
1101  in_picture = next_picture;
1102 
1103  if (!in_picture)
1104  return;
1105 
1106  in_picture->pts = ost->sync_opts;
1107 
1108 #if 1
1109  if (!check_recording_time(ost))
1110 #else
1111  if (ost->frame_number >= ost->max_frames)
1112 #endif
1113  return;
1114 
1115  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1116  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1117  /* raw pictures are written as AVPicture structure to
1118  avoid any copies. We support temporarily the older
1119  method. */
1120  if (in_picture->interlaced_frame)
1121  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1122  else
1123  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1124  pkt.data = (uint8_t *)in_picture;
1125  pkt.size = sizeof(AVPicture);
1126  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1127  pkt.flags |= AV_PKT_FLAG_KEY;
1128 
1129  write_frame(s, &pkt, ost);
1130  } else {
1131  int got_packet, forced_keyframe = 0;
1132  double pts_time;
1133 
1135  ost->top_field_first >= 0)
1136  in_picture->top_field_first = !!ost->top_field_first;
1137 
1138  if (in_picture->interlaced_frame) {
1139  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1140  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1141  else
1142  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1143  } else
1144  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1145 
1146  in_picture->quality = enc->global_quality;
1147  in_picture->pict_type = 0;
1148 
1149  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1150  in_picture->pts * av_q2d(enc->time_base) : NAN;
1151  if (ost->forced_kf_index < ost->forced_kf_count &&
1152  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1153  ost->forced_kf_index++;
1154  forced_keyframe = 1;
1155  } else if (ost->forced_keyframes_pexpr) {
1156  double res;
1157  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1160  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1166  res);
1167  if (res) {
1168  forced_keyframe = 1;
1174  }
1175 
1177  } else if ( ost->forced_keyframes
1178  && !strncmp(ost->forced_keyframes, "source", 6)
1179  && in_picture->key_frame==1) {
1180  forced_keyframe = 1;
1181  }
1182 
1183  if (forced_keyframe) {
1184  in_picture->pict_type = AV_PICTURE_TYPE_I;
1185  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1186  }
1187 
1189  if (debug_ts) {
1190  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1191  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1192  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1193  enc->time_base.num, enc->time_base.den);
1194  }
1195 
1196  ost->frames_encoded++;
1197 
1198  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1199  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1200  if (ret < 0) {
1201  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1202  exit_program(1);
1203  }
1204 
1205  if (got_packet) {
1206  if (debug_ts) {
1207  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1208  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1209  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1210  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1211  }
1212 
1213  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1214  pkt.pts = ost->sync_opts;
1215 
1216  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1217 
1218  if (debug_ts) {
1219  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1220  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1221  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1222  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1223  }
1224 
1225  frame_size = pkt.size;
1226  write_frame(s, &pkt, ost);
1227 
1228  /* if two pass, output log */
1229  if (ost->logfile && enc->stats_out) {
1230  fprintf(ost->logfile, "%s", enc->stats_out);
1231  }
1232  }
1233  }
1234  ost->sync_opts++;
1235  /*
1236  * For video, number of frames in == number of packets out.
1237  * But there may be reordering, so we can't throw away frames on encoder
1238  * flush, we need to limit them here, before they go into encoder.
1239  */
1240  ost->frame_number++;
1241 
1242  if (vstats_filename && frame_size)
1243  do_video_stats(ost, frame_size);
1244  }
1245 
1246  if (!ost->last_frame)
1247  ost->last_frame = av_frame_alloc();
1248  av_frame_unref(ost->last_frame);
1249  if (next_picture && ost->last_frame)
1250  av_frame_ref(ost->last_frame, next_picture);
1251  else
1252  av_frame_free(&ost->last_frame);
1253 }
1254 
1255 static double psnr(double d)
1256 {
1257  return -10.0 * log(d) / log(10.0);
1258 }
1259 
1261 {
1262  AVCodecContext *enc;
1263  int frame_number;
1264  double ti1, bitrate, avg_bitrate;
1265 
1266  /* this is executed just the first time do_video_stats is called */
1267  if (!vstats_file) {
1268  vstats_file = fopen(vstats_filename, "w");
1269  if (!vstats_file) {
1270  perror("fopen");
1271  exit_program(1);
1272  }
1273  }
1274 
1275  enc = ost->enc_ctx;
1276  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1277  frame_number = ost->st->nb_frames;
1278  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1279  ost->quality / (float)FF_QP2LAMBDA);
1280 
1281  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1282  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1283 
1284  fprintf(vstats_file,"f_size= %6d ", frame_size);
1285  /* compute pts value */
1286  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1287  if (ti1 < 0.01)
1288  ti1 = 0.01;
1289 
1290  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1291  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1292  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1293  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1294  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1295  }
1296 }
1297 
1299 {
1300  OutputFile *of = output_files[ost->file_index];
1301  int i;
1302 
1304 
1305  if (of->shortest) {
1306  for (i = 0; i < of->ctx->nb_streams; i++)
1307  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1308  }
1309 }
1310 
1311 /**
1312  * Get and encode new output from any of the filtergraphs, without causing
1313  * activity.
1314  *
1315  * @return 0 for success, <0 for severe errors
1316  */
1317 static int reap_filters(int flush)
1318 {
1319  AVFrame *filtered_frame = NULL;
1320  int i;
1321 
1322  /* Reap all buffers present in the buffer sinks */
1323  for (i = 0; i < nb_output_streams; i++) {
1324  OutputStream *ost = output_streams[i];
1325  OutputFile *of = output_files[ost->file_index];
1327  AVCodecContext *enc = ost->enc_ctx;
1328  int ret = 0;
1329 
1330  if (!ost->filter)
1331  continue;
1332  filter = ost->filter->filter;
1333 
1334  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1335  return AVERROR(ENOMEM);
1336  }
1337  filtered_frame = ost->filtered_frame;
1338 
1339  while (1) {
1340  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1341  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1343  if (ret < 0) {
1344  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1346  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1347  } else if (flush && ret == AVERROR_EOF) {
1348  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1349  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1350  }
1351  break;
1352  }
1353  if (ost->finished) {
1354  av_frame_unref(filtered_frame);
1355  continue;
1356  }
1357  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1358  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1359  AVRational tb = enc->time_base;
1360  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1361 
1362  tb.den <<= extra_bits;
1363  float_pts =
1364  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1365  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1366  float_pts /= 1 << extra_bits;
1367  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1368  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1369 
1370  filtered_frame->pts =
1371  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1372  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1373  }
1374  //if (ost->source_index >= 0)
1375  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1376 
1377  switch (filter->inputs[0]->type) {
1378  case AVMEDIA_TYPE_VIDEO:
1379  if (!ost->frame_aspect_ratio.num)
1380  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1381 
1382  if (debug_ts) {
1383  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1384  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1385  float_pts,
1386  enc->time_base.num, enc->time_base.den);
1387  }
1388 
1389  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1390  break;
1391  case AVMEDIA_TYPE_AUDIO:
1392  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1393  enc->channels != av_frame_get_channels(filtered_frame)) {
1395  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1396  break;
1397  }
1398  do_audio_out(of->ctx, ost, filtered_frame);
1399  break;
1400  default:
1401  // TODO support subtitle filters
1402  av_assert0(0);
1403  }
1404 
1405  av_frame_unref(filtered_frame);
1406  }
1407  }
1408 
1409  return 0;
1410 }
1411 
1412 static void print_final_stats(int64_t total_size)
1413 {
1414  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1415  uint64_t subtitle_size = 0;
1416  uint64_t data_size = 0;
1417  float percent = -1.0;
1418  int i, j;
1419  int pass1_used = 1;
1420 
1421  for (i = 0; i < nb_output_streams; i++) {
1422  OutputStream *ost = output_streams[i];
1423  switch (ost->enc_ctx->codec_type) {
1424  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1425  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1426  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1427  default: other_size += ost->data_size; break;
1428  }
1429  extra_size += ost->enc_ctx->extradata_size;
1430  data_size += ost->data_size;
1433  pass1_used = 0;
1434  }
1435 
1436  if (data_size && total_size>0 && total_size >= data_size)
1437  percent = 100.0 * (total_size - data_size) / data_size;
1438 
1439  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1440  video_size / 1024.0,
1441  audio_size / 1024.0,
1442  subtitle_size / 1024.0,
1443  other_size / 1024.0,
1444  extra_size / 1024.0);
1445  if (percent >= 0.0)
1446  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1447  else
1448  av_log(NULL, AV_LOG_INFO, "unknown");
1449  av_log(NULL, AV_LOG_INFO, "\n");
1450 
1451  /* print verbose per-stream stats */
1452  for (i = 0; i < nb_input_files; i++) {
1453  InputFile *f = input_files[i];
1454  uint64_t total_packets = 0, total_size = 0;
1455 
1456  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1457  i, f->ctx->filename);
1458 
1459  for (j = 0; j < f->nb_streams; j++) {
1460  InputStream *ist = input_streams[f->ist_index + j];
1461  enum AVMediaType type = ist->dec_ctx->codec_type;
1462 
1463  total_size += ist->data_size;
1464  total_packets += ist->nb_packets;
1465 
1466  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1467  i, j, media_type_string(type));
1468  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1469  ist->nb_packets, ist->data_size);
1470 
1471  if (ist->decoding_needed) {
1472  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1473  ist->frames_decoded);
1474  if (type == AVMEDIA_TYPE_AUDIO)
1475  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1476  av_log(NULL, AV_LOG_VERBOSE, "; ");
1477  }
1478 
1479  av_log(NULL, AV_LOG_VERBOSE, "\n");
1480  }
1481 
1482  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1483  total_packets, total_size);
1484  }
1485 
1486  for (i = 0; i < nb_output_files; i++) {
1487  OutputFile *of = output_files[i];
1488  uint64_t total_packets = 0, total_size = 0;
1489 
1490  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1491  i, of->ctx->filename);
1492 
1493  for (j = 0; j < of->ctx->nb_streams; j++) {
1494  OutputStream *ost = output_streams[of->ost_index + j];
1495  enum AVMediaType type = ost->enc_ctx->codec_type;
1496 
1497  total_size += ost->data_size;
1498  total_packets += ost->packets_written;
1499 
1500  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1501  i, j, media_type_string(type));
1502  if (ost->encoding_needed) {
1503  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1504  ost->frames_encoded);
1505  if (type == AVMEDIA_TYPE_AUDIO)
1506  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1507  av_log(NULL, AV_LOG_VERBOSE, "; ");
1508  }
1509 
1510  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1511  ost->packets_written, ost->data_size);
1512 
1513  av_log(NULL, AV_LOG_VERBOSE, "\n");
1514  }
1515 
1516  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1517  total_packets, total_size);
1518  }
1519  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1520  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1521  if (pass1_used) {
1522  av_log(NULL, AV_LOG_WARNING, "\n");
1523  } else {
1524  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1525  }
1526  }
1527 }
1528 
1529 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1530 {
1531  char buf[1024];
1532  AVBPrint buf_script;
1533  OutputStream *ost;
1534  AVFormatContext *oc;
1535  int64_t total_size;
1536  AVCodecContext *enc;
1537  int frame_number, vid, i;
1538  double bitrate;
1539  int64_t pts = INT64_MIN;
1540  static int64_t last_time = -1;
1541  static int qp_histogram[52];
1542  int hours, mins, secs, us;
1543 
1544  if (!print_stats && !is_last_report && !progress_avio)
1545  return;
1546 
1547  if (!is_last_report) {
1548  if (last_time == -1) {
1549  last_time = cur_time;
1550  return;
1551  }
1552  if ((cur_time - last_time) < 500000)
1553  return;
1554  last_time = cur_time;
1555  }
1556 
1557 
1558  oc = output_files[0]->ctx;
1559 
1560  total_size = avio_size(oc->pb);
1561  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1562  total_size = avio_tell(oc->pb);
1563 
1564  buf[0] = '\0';
1565  vid = 0;
1566  av_bprint_init(&buf_script, 0, 1);
1567  for (i = 0; i < nb_output_streams; i++) {
1568  float q = -1;
1569  ost = output_streams[i];
1570  enc = ost->enc_ctx;
1571  if (!ost->stream_copy)
1572  q = ost->quality / (float) FF_QP2LAMBDA;
1573 
1574  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1575  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1576  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1577  ost->file_index, ost->index, q);
1578  }
1579  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1580  float fps, t = (cur_time-timer_start) / 1000000.0;
1581 
1582  frame_number = ost->frame_number;
1583  fps = t > 1 ? frame_number / t : 0;
1584  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1585  frame_number, fps < 9.95, fps, q);
1586  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1587  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1588  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1589  ost->file_index, ost->index, q);
1590  if (is_last_report)
1591  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1592  if (qp_hist) {
1593  int j;
1594  int qp = lrintf(q);
1595  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1596  qp_histogram[qp]++;
1597  for (j = 0; j < 32; j++)
1598  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1599  }
1600 
1601  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1602  int j;
1603  double error, error_sum = 0;
1604  double scale, scale_sum = 0;
1605  double p;
1606  char type[3] = { 'Y','U','V' };
1607  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1608  for (j = 0; j < 3; j++) {
1609  if (is_last_report) {
1610  error = enc->error[j];
1611  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1612  } else {
1613  error = ost->error[j];
1614  scale = enc->width * enc->height * 255.0 * 255.0;
1615  }
1616  if (j)
1617  scale /= 4;
1618  error_sum += error;
1619  scale_sum += scale;
1620  p = psnr(error / scale);
1621  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1622  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1623  ost->file_index, ost->index, type[j] | 32, p);
1624  }
1625  p = psnr(error_sum / scale_sum);
1626  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1627  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1628  ost->file_index, ost->index, p);
1629  }
1630  vid = 1;
1631  }
1632  /* compute min output value */
1634  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1635  ost->st->time_base, AV_TIME_BASE_Q));
1636  if (is_last_report)
1637  nb_frames_drop += ost->last_droped;
1638  }
1639 
1640  secs = FFABS(pts) / AV_TIME_BASE;
1641  us = FFABS(pts) % AV_TIME_BASE;
1642  mins = secs / 60;
1643  secs %= 60;
1644  hours = mins / 60;
1645  mins %= 60;
1646 
1647  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1648 
1649  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1650  "size=N/A time=");
1651  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1652  "size=%8.0fkB time=", total_size / 1024.0);
1653  if (pts < 0)
1654  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1655  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1656  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1657  (100 * us) / AV_TIME_BASE);
1658 
1659  if (bitrate < 0) {
1660  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1661  av_bprintf(&buf_script, "bitrate=N/A\n");
1662  }else{
1663  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1664  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1665  }
1666 
1667  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1668  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1669  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1670  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1671  hours, mins, secs, us);
1672 
1674  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1676  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1677  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1678 
1679  if (print_stats || is_last_report) {
1680  const char end = is_last_report ? '\n' : '\r';
1681  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1682  fprintf(stderr, "%s %c", buf, end);
1683  } else
1684  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1685 
1686  fflush(stderr);
1687  }
1688 
1689  if (progress_avio) {
1690  av_bprintf(&buf_script, "progress=%s\n",
1691  is_last_report ? "end" : "continue");
1692  avio_write(progress_avio, buf_script.str,
1693  FFMIN(buf_script.len, buf_script.size - 1));
1694  avio_flush(progress_avio);
1695  av_bprint_finalize(&buf_script, NULL);
1696  if (is_last_report) {
1697  avio_closep(&progress_avio);
1698  }
1699  }
1700 
1701  if (is_last_report)
1702  print_final_stats(total_size);
1703 }
1704 
1705 static void flush_encoders(void)
1706 {
1707  int i, ret;
1708 
1709  for (i = 0; i < nb_output_streams; i++) {
1710  OutputStream *ost = output_streams[i];
1711  AVCodecContext *enc = ost->enc_ctx;
1712  AVFormatContext *os = output_files[ost->file_index]->ctx;
1713  int stop_encoding = 0;
1714 
1715  if (!ost->encoding_needed)
1716  continue;
1717 
1718  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1719  continue;
1721  continue;
1722 
1723  for (;;) {
1724  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1725  const char *desc;
1726 
1727  switch (enc->codec_type) {
1728  case AVMEDIA_TYPE_AUDIO:
1729  encode = avcodec_encode_audio2;
1730  desc = "Audio";
1731  break;
1732  case AVMEDIA_TYPE_VIDEO:
1733  encode = avcodec_encode_video2;
1734  desc = "Video";
1735  break;
1736  default:
1737  stop_encoding = 1;
1738  }
1739 
1740  if (encode) {
1741  AVPacket pkt;
1742  int pkt_size;
1743  int got_packet;
1744  av_init_packet(&pkt);
1745  pkt.data = NULL;
1746  pkt.size = 0;
1747 
1749  ret = encode(enc, &pkt, NULL, &got_packet);
1750  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1751  if (ret < 0) {
1752  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1753  desc,
1754  av_err2str(ret));
1755  exit_program(1);
1756  }
1757  if (ost->logfile && enc->stats_out) {
1758  fprintf(ost->logfile, "%s", enc->stats_out);
1759  }
1760  if (!got_packet) {
1761  stop_encoding = 1;
1762  break;
1763  }
1764  if (ost->finished & MUXER_FINISHED) {
1765  av_free_packet(&pkt);
1766  continue;
1767  }
1768  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1769  pkt_size = pkt.size;
1770  write_frame(os, &pkt, ost);
1772  do_video_stats(ost, pkt_size);
1773  }
1774  }
1775 
1776  if (stop_encoding)
1777  break;
1778  }
1779  }
1780 }
1781 
1782 /*
1783  * Check whether a packet from ist should be written into ost at this time
1784  */
1786 {
1787  OutputFile *of = output_files[ost->file_index];
1788  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1789 
1790  if (ost->source_index != ist_index)
1791  return 0;
1792 
1793  if (ost->finished)
1794  return 0;
1795 
1796  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1797  return 0;
1798 
1799  return 1;
1800 }
1801 
1802 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1803 {
1804  OutputFile *of = output_files[ost->file_index];
1805  InputFile *f = input_files [ist->file_index];
1806  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1807  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1808  int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1809  AVPicture pict;
1810  AVPacket opkt;
1811 
1812  av_init_packet(&opkt);
1813 
1814  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1816  return;
1817 
1818  if (pkt->pts == AV_NOPTS_VALUE) {
1819  if (!ost->frame_number && ist->pts < start_time &&
1820  !ost->copy_prior_start)
1821  return;
1822  } else {
1823  if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1824  !ost->copy_prior_start)
1825  return;
1826  }
1827 
1828  if (of->recording_time != INT64_MAX &&
1829  ist->pts >= of->recording_time + start_time) {
1830  close_output_stream(ost);
1831  return;
1832  }
1833 
1834  if (f->recording_time != INT64_MAX) {
1835  start_time = f->ctx->start_time;
1836  if (f->start_time != AV_NOPTS_VALUE)
1837  start_time += f->start_time;
1838  if (ist->pts >= f->recording_time + start_time) {
1839  close_output_stream(ost);
1840  return;
1841  }
1842  }
1843 
1844  /* force the input stream PTS */
1845  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1846  ost->sync_opts++;
1847 
1848  if (pkt->pts != AV_NOPTS_VALUE)
1849  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1850  else
1851  opkt.pts = AV_NOPTS_VALUE;
1852 
1853  if (pkt->dts == AV_NOPTS_VALUE)
1854  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1855  else
1856  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1857  opkt.dts -= ost_tb_start_time;
1858 
1859  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1861  if(!duration)
1862  duration = ist->dec_ctx->frame_size;
1863  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1865  ost->st->time_base) - ost_tb_start_time;
1866  }
1867 
1868  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1869  opkt.flags = pkt->flags;
1870  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1871  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1872  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1873  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1874  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1875  ) {
1876  int ret = av_parser_change(ost->parser, ost->st->codec,
1877  &opkt.data, &opkt.size,
1878  pkt->data, pkt->size,
1880  if (ret < 0) {
1881  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1882  av_err2str(ret));
1883  exit_program(1);
1884  }
1885  if (ret) {
1886  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1887  if (!opkt.buf)
1888  exit_program(1);
1889  }
1890  } else {
1891  opkt.data = pkt->data;
1892  opkt.size = pkt->size;
1893  }
1894  av_copy_packet_side_data(&opkt, pkt);
1895 
1896  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1897  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1898  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1899  /* store AVPicture in AVPacket, as expected by the output format */
1900  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1901  if (ret < 0) {
1902  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1903  av_err2str(ret));
1904  exit_program(1);
1905  }
1906  opkt.data = (uint8_t *)&pict;
1907  opkt.size = sizeof(AVPicture);
1908  opkt.flags |= AV_PKT_FLAG_KEY;
1909  }
1910 
1911  write_frame(of->ctx, &opkt, ost);
1912 }
1913 
1915 {
1916  AVCodecContext *dec = ist->dec_ctx;
1917 
1918  if (!dec->channel_layout) {
1919  char layout_name[256];
1920 
1921  if (dec->channels > ist->guess_layout_max)
1922  return 0;
1924  if (!dec->channel_layout)
1925  return 0;
1926  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1927  dec->channels, dec->channel_layout);
1928  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1929  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1930  }
1931  return 1;
1932 }
1933 
1934 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1935 {
1936  AVFrame *decoded_frame, *f;
1937  AVCodecContext *avctx = ist->dec_ctx;
1938  int i, ret, err = 0, resample_changed;
1939  AVRational decoded_frame_tb;
1940 
1941  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1942  return AVERROR(ENOMEM);
1943  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1944  return AVERROR(ENOMEM);
1945  decoded_frame = ist->decoded_frame;
1946 
1948  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1949  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1950 
1951  if (ret >= 0 && avctx->sample_rate <= 0) {
1952  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1953  ret = AVERROR_INVALIDDATA;
1954  }
1955 
1956  if (*got_output || ret<0)
1957  decode_error_stat[ret<0] ++;
1958 
1959  if (ret < 0 && exit_on_error)
1960  exit_program(1);
1961 
1962  if (!*got_output || ret < 0)
1963  return ret;
1964 
1965  ist->samples_decoded += decoded_frame->nb_samples;
1966  ist->frames_decoded++;
1967 
1968 #if 1
1969  /* increment next_dts to use for the case where the input stream does not
1970  have timestamps or there are multiple frames in the packet */
1971  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1972  avctx->sample_rate;
1973  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1974  avctx->sample_rate;
1975 #endif
1976 
1977  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1978  ist->resample_channels != avctx->channels ||
1979  ist->resample_channel_layout != decoded_frame->channel_layout ||
1980  ist->resample_sample_rate != decoded_frame->sample_rate;
1981  if (resample_changed) {
1982  char layout1[64], layout2[64];
1983 
1984  if (!guess_input_channel_layout(ist)) {
1985  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1986  "layout for Input Stream #%d.%d\n", ist->file_index,
1987  ist->st->index);
1988  exit_program(1);
1989  }
1990  decoded_frame->channel_layout = avctx->channel_layout;
1991 
1992  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1994  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1995  decoded_frame->channel_layout);
1996 
1998  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1999  ist->file_index, ist->st->index,
2001  ist->resample_channels, layout1,
2002  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2003  avctx->channels, layout2);
2004 
2005  ist->resample_sample_fmt = decoded_frame->format;
2006  ist->resample_sample_rate = decoded_frame->sample_rate;
2007  ist->resample_channel_layout = decoded_frame->channel_layout;
2008  ist->resample_channels = avctx->channels;
2009 
2010  for (i = 0; i < nb_filtergraphs; i++)
2011  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2012  FilterGraph *fg = filtergraphs[i];
2013  if (configure_filtergraph(fg) < 0) {
2014  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2015  exit_program(1);
2016  }
2017  }
2018  }
2019 
2020  /* if the decoder provides a pts, use it instead of the last packet pts.
2021  the decoder could be delaying output by a packet or more. */
2022  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2023  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2024  decoded_frame_tb = avctx->time_base;
2025  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2026  decoded_frame->pts = decoded_frame->pkt_pts;
2027  decoded_frame_tb = ist->st->time_base;
2028  } else if (pkt->pts != AV_NOPTS_VALUE) {
2029  decoded_frame->pts = pkt->pts;
2030  decoded_frame_tb = ist->st->time_base;
2031  }else {
2032  decoded_frame->pts = ist->dts;
2033  decoded_frame_tb = AV_TIME_BASE_Q;
2034  }
2035  pkt->pts = AV_NOPTS_VALUE;
2036  if (decoded_frame->pts != AV_NOPTS_VALUE)
2037  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2038  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2039  (AVRational){1, avctx->sample_rate});
2040  for (i = 0; i < ist->nb_filters; i++) {
2041  if (i < ist->nb_filters - 1) {
2042  f = ist->filter_frame;
2043  err = av_frame_ref(f, decoded_frame);
2044  if (err < 0)
2045  break;
2046  } else
2047  f = decoded_frame;
2048  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2050  if (err == AVERROR_EOF)
2051  err = 0; /* ignore */
2052  if (err < 0)
2053  break;
2054  }
2055  decoded_frame->pts = AV_NOPTS_VALUE;
2056 
2057  av_frame_unref(ist->filter_frame);
2058  av_frame_unref(decoded_frame);
2059  return err < 0 ? err : ret;
2060 }
2061 
2062 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2063 {
2064  AVFrame *decoded_frame, *f;
2065  int i, ret = 0, err = 0, resample_changed;
2066  int64_t best_effort_timestamp;
2067  AVRational *frame_sample_aspect;
2068 
2069  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2070  return AVERROR(ENOMEM);
2071  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2072  return AVERROR(ENOMEM);
2073  decoded_frame = ist->decoded_frame;
2074  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2075 
2077  ret = avcodec_decode_video2(ist->dec_ctx,
2078  decoded_frame, got_output, pkt);
2079  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2080 
2081  // The following line may be required in some cases where there is no parser
2082  // or the parser does not has_b_frames correctly
2083  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2084  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2085  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2086  } else
2088  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2089  "If you want to help, upload a sample "
2090  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2091  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2092  ist->dec_ctx->has_b_frames,
2093  ist->st->codec->has_b_frames);
2094  }
2095 
2096  if (*got_output || ret<0)
2097  decode_error_stat[ret<0] ++;
2098 
2099  if (ret < 0 && exit_on_error)
2100  exit_program(1);
2101 
2102  if (*got_output && ret >= 0) {
2103  if (ist->dec_ctx->width != decoded_frame->width ||
2104  ist->dec_ctx->height != decoded_frame->height ||
2105  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2106  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2107  decoded_frame->width,
2108  decoded_frame->height,
2109  decoded_frame->format,
2110  ist->dec_ctx->width,
2111  ist->dec_ctx->height,
2112  ist->dec_ctx->pix_fmt);
2113  }
2114  }
2115 
2116  if (!*got_output || ret < 0)
2117  return ret;
2118 
2119  if(ist->top_field_first>=0)
2120  decoded_frame->top_field_first = ist->top_field_first;
2121 
2122  ist->frames_decoded++;
2123 
2124  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2125  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2126  if (err < 0)
2127  goto fail;
2128  }
2129  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2130 
2131  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2132  if(best_effort_timestamp != AV_NOPTS_VALUE)
2133  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2134 
2135  if (debug_ts) {
2136  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2137  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2138  ist->st->index, av_ts2str(decoded_frame->pts),
2139  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2140  best_effort_timestamp,
2141  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2142  decoded_frame->key_frame, decoded_frame->pict_type,
2143  ist->st->time_base.num, ist->st->time_base.den);
2144  }
2145 
2146  pkt->size = 0;
2147 
2148  if (ist->st->sample_aspect_ratio.num)
2149  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2150 
2151  resample_changed = ist->resample_width != decoded_frame->width ||
2152  ist->resample_height != decoded_frame->height ||
2153  ist->resample_pix_fmt != decoded_frame->format;
2154  if (resample_changed) {
2156  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2157  ist->file_index, ist->st->index,
2159  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2160 
2161  ist->resample_width = decoded_frame->width;
2162  ist->resample_height = decoded_frame->height;
2163  ist->resample_pix_fmt = decoded_frame->format;
2164 
2165  for (i = 0; i < nb_filtergraphs; i++) {
2166  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2167  configure_filtergraph(filtergraphs[i]) < 0) {
2168  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2169  exit_program(1);
2170  }
2171  }
2172  }
2173 
2174  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2175  for (i = 0; i < ist->nb_filters; i++) {
2176  if (!frame_sample_aspect->num)
2177  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2178 
2179  if (i < ist->nb_filters - 1) {
2180  f = ist->filter_frame;
2181  err = av_frame_ref(f, decoded_frame);
2182  if (err < 0)
2183  break;
2184  } else
2185  f = decoded_frame;
2187  if (ret == AVERROR_EOF) {
2188  ret = 0; /* ignore */
2189  } else if (ret < 0) {
2191  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2192  exit_program(1);
2193  }
2194  }
2195 
2196 fail:
2198  av_frame_unref(decoded_frame);
2199  return err < 0 ? err : ret;
2200 }
2201 
2202 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2203 {
2204  AVSubtitle subtitle;
2205  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2206  &subtitle, got_output, pkt);
2207 
2208  if (*got_output || ret<0)
2209  decode_error_stat[ret<0] ++;
2210 
2211  if (ret < 0 && exit_on_error)
2212  exit_program(1);
2213 
2214  if (ret < 0 || !*got_output) {
2215  if (!pkt->size)
2216  sub2video_flush(ist);
2217  return ret;
2218  }
2219 
2220  if (ist->fix_sub_duration) {
2221  int end = 1;
2222  if (ist->prev_sub.got_output) {
2223  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2224  1000, AV_TIME_BASE);
2225  if (end < ist->prev_sub.subtitle.end_display_time) {
2226  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2227  "Subtitle duration reduced from %d to %d%s\n",
2229  end <= 0 ? ", dropping it" : "");
2231  }
2232  }
2233  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2234  FFSWAP(int, ret, ist->prev_sub.ret);
2235  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2236  if (end <= 0)
2237  goto out;
2238  }
2239 
2240  if (!*got_output)
2241  return ret;
2242 
2243  sub2video_update(ist, &subtitle);
2244 
2245  if (!subtitle.num_rects)
2246  goto out;
2247 
2248  ist->frames_decoded++;
2249 
2250  for (i = 0; i < nb_output_streams; i++) {
2251  OutputStream *ost = output_streams[i];
2252 
2253  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2254  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2255  continue;
2256 
2257  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2258  }
2259 
2260 out:
2261  avsubtitle_free(&subtitle);
2262  return ret;
2263 }
2264 
2266 {
2267  int i, ret;
2268  for (i = 0; i < ist->nb_filters; i++) {
2269  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2270  if (ret < 0)
2271  return ret;
2272  }
2273  return 0;
2274 }
2275 
2276 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2278 {
2279  int ret = 0, i;
2280  int got_output = 0;
2281 
2282  AVPacket avpkt;
2283  if (!ist->saw_first_ts) {
2284  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2285  ist->pts = 0;
2286  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2287  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2288  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2289  }
2290  ist->saw_first_ts = 1;
2291  }
2292 
2293  if (ist->next_dts == AV_NOPTS_VALUE)
2294  ist->next_dts = ist->dts;
2295  if (ist->next_pts == AV_NOPTS_VALUE)
2296  ist->next_pts = ist->pts;
2297 
2298  if (!pkt) {
2299  /* EOF handling */
2300  av_init_packet(&avpkt);
2301  avpkt.data = NULL;
2302  avpkt.size = 0;
2303  goto handle_eof;
2304  } else {
2305  avpkt = *pkt;
2306  }
2307 
2308  if (pkt->dts != AV_NOPTS_VALUE) {
2309  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2310  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2311  ist->next_pts = ist->pts = ist->dts;
2312  }
2313 
2314  // while we have more to decode or while the decoder did output something on EOF
2315  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2316  int duration;
2317  handle_eof:
2318 
2319  ist->pts = ist->next_pts;
2320  ist->dts = ist->next_dts;
2321 
2322  if (avpkt.size && avpkt.size != pkt->size &&
2325  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2326  ist->showed_multi_packet_warning = 1;
2327  }
2328 
2329  switch (ist->dec_ctx->codec_type) {
2330  case AVMEDIA_TYPE_AUDIO:
2331  ret = decode_audio (ist, &avpkt, &got_output);
2332  break;
2333  case AVMEDIA_TYPE_VIDEO:
2334  ret = decode_video (ist, &avpkt, &got_output);
2335  if (avpkt.duration) {
2336  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2337  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2339  duration = ((int64_t)AV_TIME_BASE *
2340  ist->dec_ctx->framerate.den * ticks) /
2342  } else
2343  duration = 0;
2344 
2345  if(ist->dts != AV_NOPTS_VALUE && duration) {
2346  ist->next_dts += duration;
2347  }else
2348  ist->next_dts = AV_NOPTS_VALUE;
2349 
2350  if (got_output)
2351  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2352  break;
2353  case AVMEDIA_TYPE_SUBTITLE:
2354  ret = transcode_subtitles(ist, &avpkt, &got_output);
2355  break;
2356  default:
2357  return -1;
2358  }
2359 
2360  if (ret < 0) {
2361  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2362  ist->file_index, ist->st->index, av_err2str(ret));
2363  if (exit_on_error)
2364  exit_program(1);
2365  break;
2366  }
2367 
2368  avpkt.dts=
2369  avpkt.pts= AV_NOPTS_VALUE;
2370 
2371  // touch data and size only if not EOF
2372  if (pkt) {
2374  ret = avpkt.size;
2375  avpkt.data += ret;
2376  avpkt.size -= ret;
2377  }
2378  if (!got_output) {
2379  continue;
2380  }
2381  if (got_output && !pkt)
2382  break;
2383  }
2384 
2385  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2386  if (!pkt && ist->decoding_needed && !got_output) {
2387  int ret = send_filter_eof(ist);
2388  if (ret < 0) {
2389  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2390  exit_program(1);
2391  }
2392  }
2393 
2394  /* handle stream copy */
2395  if (!ist->decoding_needed) {
2396  ist->dts = ist->next_dts;
2397  switch (ist->dec_ctx->codec_type) {
2398  case AVMEDIA_TYPE_AUDIO:
2399  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2400  ist->dec_ctx->sample_rate;
2401  break;
2402  case AVMEDIA_TYPE_VIDEO:
2403  if (ist->framerate.num) {
2404  // TODO: Remove work-around for c99-to-c89 issue 7
2405  AVRational time_base_q = AV_TIME_BASE_Q;
2406  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2407  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2408  } else if (pkt->duration) {
2409  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2410  } else if(ist->dec_ctx->framerate.num != 0) {
2411  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2412  ist->next_dts += ((int64_t)AV_TIME_BASE *
2413  ist->dec_ctx->framerate.den * ticks) /
2415  }
2416  break;
2417  }
2418  ist->pts = ist->dts;
2419  ist->next_pts = ist->next_dts;
2420  }
2421  for (i = 0; pkt && i < nb_output_streams; i++) {
2422  OutputStream *ost = output_streams[i];
2423 
2424  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2425  continue;
2426 
2427  do_streamcopy(ist, ost, pkt);
2428  }
2429 
2430  return got_output;
2431 }
2432 
2433 static void print_sdp(void)
2434 {
2435  char sdp[16384];
2436  int i;
2437  int j;
2438  AVIOContext *sdp_pb;
2439  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2440 
2441  if (!avc)
2442  exit_program(1);
2443  for (i = 0, j = 0; i < nb_output_files; i++) {
2444  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2445  avc[j] = output_files[i]->ctx;
2446  j++;
2447  }
2448  }
2449 
2450  av_sdp_create(avc, j, sdp, sizeof(sdp));
2451 
2452  if (!sdp_filename) {
2453  printf("SDP:\n%s\n", sdp);
2454  fflush(stdout);
2455  } else {
2456  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2457  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2458  } else {
2459  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2460  avio_closep(&sdp_pb);
2462  }
2463  }
2464 
2465  av_freep(&avc);
2466 }
2467 
2469 {
2470  int i;
2471  for (i = 0; hwaccels[i].name; i++)
2472  if (hwaccels[i].pix_fmt == pix_fmt)
2473  return &hwaccels[i];
2474  return NULL;
2475 }
2476 
2478 {
2479  InputStream *ist = s->opaque;
2480  const enum AVPixelFormat *p;
2481  int ret;
2482 
2483  for (p = pix_fmts; *p != -1; p++) {
2484  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2485  const HWAccel *hwaccel;
2486 
2487  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2488  break;
2489 
2490  hwaccel = get_hwaccel(*p);
2491  if (!hwaccel ||
2492  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2493  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2494  continue;
2495 
2496  ret = hwaccel->init(s);
2497  if (ret < 0) {
2498  if (ist->hwaccel_id == hwaccel->id) {
2500  "%s hwaccel requested for input stream #%d:%d, "
2501  "but cannot be initialized.\n", hwaccel->name,
2502  ist->file_index, ist->st->index);
2503  return AV_PIX_FMT_NONE;
2504  }
2505  continue;
2506  }
2507  ist->active_hwaccel_id = hwaccel->id;
2508  ist->hwaccel_pix_fmt = *p;
2509  break;
2510  }
2511 
2512  return *p;
2513 }
2514 
2516 {
2517  InputStream *ist = s->opaque;
2518 
2519  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2520  return ist->hwaccel_get_buffer(s, frame, flags);
2521 
2522  return avcodec_default_get_buffer2(s, frame, flags);
2523 }
2524 
2525 static int init_input_stream(int ist_index, char *error, int error_len)
2526 {
2527  int ret;
2528  InputStream *ist = input_streams[ist_index];
2529 
2530  if (ist->decoding_needed) {
2531  AVCodec *codec = ist->dec;
2532  if (!codec) {
2533  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2534  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2535  return AVERROR(EINVAL);
2536  }
2537 
2538  ist->dec_ctx->opaque = ist;
2539  ist->dec_ctx->get_format = get_format;
2540  ist->dec_ctx->get_buffer2 = get_buffer;
2541  ist->dec_ctx->thread_safe_callbacks = 1;
2542 
2543  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2544  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2545  (ist->decoding_needed & DECODING_FOR_OST)) {
2546  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2548  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2549  }
2550 
2551  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2552  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2553  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2554  if (ret == AVERROR_EXPERIMENTAL)
2555  abort_codec_experimental(codec, 0);
2556 
2557  snprintf(error, error_len,
2558  "Error while opening decoder for input stream "
2559  "#%d:%d : %s",
2560  ist->file_index, ist->st->index, av_err2str(ret));
2561  return ret;
2562  }
2564  }
2565 
2566  ist->next_pts = AV_NOPTS_VALUE;
2567  ist->next_dts = AV_NOPTS_VALUE;
2568 
2569  return 0;
2570 }
2571 
2573 {
2574  if (ost->source_index >= 0)
2575  return input_streams[ost->source_index];
2576  return NULL;
2577 }
2578 
2579 static int compare_int64(const void *a, const void *b)
2580 {
2581  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2582  return va < vb ? -1 : va > vb ? +1 : 0;
2583 }
2584 
2585 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2586 {
2587  int ret = 0;
2588 
2589  if (ost->encoding_needed) {
2590  AVCodec *codec = ost->enc;
2591  AVCodecContext *dec = NULL;
2592  InputStream *ist;
2593 
2594  if ((ist = get_input_stream(ost)))
2595  dec = ist->dec_ctx;
2596  if (dec && dec->subtitle_header) {
2597  /* ASS code assumes this buffer is null terminated so add extra byte. */
2599  if (!ost->enc_ctx->subtitle_header)
2600  return AVERROR(ENOMEM);
2601  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2603  }
2604  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2605  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2606  av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
2607  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2608  !codec->defaults &&
2609  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2610  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2611  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2612 
2613  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2614  if (ret == AVERROR_EXPERIMENTAL)
2615  abort_codec_experimental(codec, 1);
2616  snprintf(error, error_len,
2617  "Error while opening encoder for output stream #%d:%d - "
2618  "maybe incorrect parameters such as bit_rate, rate, width or height",
2619  ost->file_index, ost->index);
2620  return ret;
2621  }
2622  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2625  ost->enc_ctx->frame_size);
2627  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2628  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2629  " It takes bits/s as argument, not kbits/s\n");
2630 
2631  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2632  if (ret < 0) {
2634  "Error initializing the output stream codec context.\n");
2635  exit_program(1);
2636  }
2637 
2638  // copy timebase while removing common factors
2639  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2640  ost->st->codec->codec= ost->enc_ctx->codec;
2641  } else {
2642  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2643  if (ret < 0) {
2645  "Error setting up codec context options.\n");
2646  return ret;
2647  }
2648  // copy timebase while removing common factors
2649  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2650  }
2651 
2652  return ret;
2653 }
2654 
2655 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2656  AVCodecContext *avctx)
2657 {
2658  char *p;
2659  int n = 1, i, size, index = 0;
2660  int64_t t, *pts;
2661 
2662  for (p = kf; *p; p++)
2663  if (*p == ',')
2664  n++;
2665  size = n;
2666  pts = av_malloc_array(size, sizeof(*pts));
2667  if (!pts) {
2668  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2669  exit_program(1);
2670  }
2671 
2672  p = kf;
2673  for (i = 0; i < n; i++) {
2674  char *next = strchr(p, ',');
2675 
2676  if (next)
2677  *next++ = 0;
2678 
2679  if (!memcmp(p, "chapters", 8)) {
2680 
2681  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2682  int j;
2683 
2684  if (avf->nb_chapters > INT_MAX - size ||
2685  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2686  sizeof(*pts)))) {
2688  "Could not allocate forced key frames array.\n");
2689  exit_program(1);
2690  }
2691  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2692  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2693 
2694  for (j = 0; j < avf->nb_chapters; j++) {
2695  AVChapter *c = avf->chapters[j];
2696  av_assert1(index < size);
2697  pts[index++] = av_rescale_q(c->start, c->time_base,
2698  avctx->time_base) + t;
2699  }
2700 
2701  } else {
2702 
2703  t = parse_time_or_die("force_key_frames", p, 1);
2704  av_assert1(index < size);
2705  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2706 
2707  }
2708 
2709  p = next;
2710  }
2711 
2712  av_assert0(index == size);
2713  qsort(pts, size, sizeof(*pts), compare_int64);
2714  ost->forced_kf_count = size;
2715  ost->forced_kf_pts = pts;
2716 }
2717 
2718 static void report_new_stream(int input_index, AVPacket *pkt)
2719 {
2720  InputFile *file = input_files[input_index];
2721  AVStream *st = file->ctx->streams[pkt->stream_index];
2722 
2723  if (pkt->stream_index < file->nb_streams_warn)
2724  return;
2725  av_log(file->ctx, AV_LOG_WARNING,
2726  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2728  input_index, pkt->stream_index,
2729  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2730  file->nb_streams_warn = pkt->stream_index + 1;
2731 }
2732 
2734 {
2735  AVDictionaryEntry *e;
2736 
2737  uint8_t *encoder_string;
2738  int encoder_string_len;
2739  int format_flags = 0;
2740  int codec_flags = 0;
2741 
2742  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2743  return;
2744 
2745  e = av_dict_get(of->opts, "fflags", NULL, 0);
2746  if (e) {
2747  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2748  if (!o)
2749  return;
2750  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2751  }
2752  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2753  if (e) {
2754  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2755  if (!o)
2756  return;
2757  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2758  }
2759 
2760  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2761  encoder_string = av_mallocz(encoder_string_len);
2762  if (!encoder_string)
2763  exit_program(1);
2764 
2765  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2766  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2767  else
2768  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2769  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2770  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2772 }
2773 
2774 static int transcode_init(void)
2775 {
2776  int ret = 0, i, j, k;
2777  AVFormatContext *oc;
2778  OutputStream *ost;
2779  InputStream *ist;
2780  char error[1024] = {0};
2781  int want_sdp = 1;
2782 
2783  for (i = 0; i < nb_filtergraphs; i++) {
2784  FilterGraph *fg = filtergraphs[i];
2785  for (j = 0; j < fg->nb_outputs; j++) {
2786  OutputFilter *ofilter = fg->outputs[j];
2787  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2788  continue;
2789  if (fg->nb_inputs != 1)
2790  continue;
2791  for (k = nb_input_streams-1; k >= 0 ; k--)
2792  if (fg->inputs[0]->ist == input_streams[k])
2793  break;
2794  ofilter->ost->source_index = k;
2795  }
2796  }
2797 
2798  /* init framerate emulation */
2799  for (i = 0; i < nb_input_files; i++) {
2800  InputFile *ifile = input_files[i];
2801  if (ifile->rate_emu)
2802  for (j = 0; j < ifile->nb_streams; j++)
2803  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2804  }
2805 
2806  /* for each output stream, we compute the right encoding parameters */
2807  for (i = 0; i < nb_output_streams; i++) {
2808  AVCodecContext *enc_ctx;
2810  ost = output_streams[i];
2811  oc = output_files[ost->file_index]->ctx;
2812  ist = get_input_stream(ost);
2813 
2814  if (ost->attachment_filename)
2815  continue;
2816 
2817  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2818 
2819  if (ist) {
2820  dec_ctx = ist->dec_ctx;
2821 
2822  ost->st->disposition = ist->st->disposition;
2823  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2824  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2825  } else {
2826  for (j=0; j<oc->nb_streams; j++) {
2827  AVStream *st = oc->streams[j];
2828  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2829  break;
2830  }
2831  if (j == oc->nb_streams)
2832  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2834  }
2835 
2836  if (ost->stream_copy) {
2837  AVRational sar;
2838  uint64_t extra_size;
2839 
2840  av_assert0(ist && !ost->filter);
2841 
2842  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2843 
2844  if (extra_size > INT_MAX) {
2845  return AVERROR(EINVAL);
2846  }
2847 
2848  /* if stream_copy is selected, no need to decode or encode */
2849  enc_ctx->codec_id = dec_ctx->codec_id;
2850  enc_ctx->codec_type = dec_ctx->codec_type;
2851 
2852  if (!enc_ctx->codec_tag) {
2853  unsigned int codec_tag;
2854  if (!oc->oformat->codec_tag ||
2855  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2856  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2857  enc_ctx->codec_tag = dec_ctx->codec_tag;
2858  }
2859 
2860  enc_ctx->bit_rate = dec_ctx->bit_rate;
2861  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2862  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2863  enc_ctx->field_order = dec_ctx->field_order;
2864  if (dec_ctx->extradata_size) {
2865  enc_ctx->extradata = av_mallocz(extra_size);
2866  if (!enc_ctx->extradata) {
2867  return AVERROR(ENOMEM);
2868  }
2869  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2870  }
2871  enc_ctx->extradata_size= dec_ctx->extradata_size;
2872  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2873 
2874  enc_ctx->time_base = ist->st->time_base;
2875  /*
2876  * Avi is a special case here because it supports variable fps but
2877  * having the fps and timebase differe significantly adds quite some
2878  * overhead
2879  */
2880  if(!strcmp(oc->oformat->name, "avi")) {
2881  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2882  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2883  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2884  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2885  || copy_tb==2){
2886  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2887  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2888  enc_ctx->ticks_per_frame = 2;
2889  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2890  && av_q2d(ist->st->time_base) < 1.0/500
2891  || copy_tb==0){
2892  enc_ctx->time_base = dec_ctx->time_base;
2893  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2894  enc_ctx->time_base.den *= 2;
2895  enc_ctx->ticks_per_frame = 2;
2896  }
2897  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2898  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2899  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2900  && strcmp(oc->oformat->name, "f4v")
2901  ) {
2902  if( copy_tb<0 && dec_ctx->time_base.den
2903  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2904  && av_q2d(ist->st->time_base) < 1.0/500
2905  || copy_tb==0){
2906  enc_ctx->time_base = dec_ctx->time_base;
2907  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2908  }
2909  }
2910  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2911  && dec_ctx->time_base.num < dec_ctx->time_base.den
2912  && dec_ctx->time_base.num > 0
2913  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2914  enc_ctx->time_base = dec_ctx->time_base;
2915  }
2916 
2917  if (!ost->frame_rate.num)
2918  ost->frame_rate = ist->framerate;
2919  if(ost->frame_rate.num)
2920  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2921 
2922  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2923  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2924 
2925  if (ist->st->nb_side_data) {
2927  sizeof(*ist->st->side_data));
2928  if (!ost->st->side_data)
2929  return AVERROR(ENOMEM);
2930 
2931  ost->st->nb_side_data = 0;
2932  for (j = 0; j < ist->st->nb_side_data; j++) {
2933  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2934  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2935 
2936  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2937  continue;
2938 
2939  sd_dst->data = av_malloc(sd_src->size);
2940  if (!sd_dst->data)
2941  return AVERROR(ENOMEM);
2942  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2943  sd_dst->size = sd_src->size;
2944  sd_dst->type = sd_src->type;
2945  ost->st->nb_side_data++;
2946  }
2947  }
2948 
2949  ost->parser = av_parser_init(enc_ctx->codec_id);
2950 
2951  switch (enc_ctx->codec_type) {
2952  case AVMEDIA_TYPE_AUDIO:
2953  if (audio_volume != 256) {
2954  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2955  exit_program(1);
2956  }
2957  enc_ctx->channel_layout = dec_ctx->channel_layout;
2958  enc_ctx->sample_rate = dec_ctx->sample_rate;
2959  enc_ctx->channels = dec_ctx->channels;
2960  enc_ctx->frame_size = dec_ctx->frame_size;
2961  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2962  enc_ctx->block_align = dec_ctx->block_align;
2963  enc_ctx->initial_padding = dec_ctx->delay;
2964 #if FF_API_AUDIOENC_DELAY
2965  enc_ctx->delay = dec_ctx->delay;
2966 #endif
2967  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2968  enc_ctx->block_align= 0;
2969  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2970  enc_ctx->block_align= 0;
2971  break;
2972  case AVMEDIA_TYPE_VIDEO:
2973  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2974  enc_ctx->width = dec_ctx->width;
2975  enc_ctx->height = dec_ctx->height;
2976  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2977  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2978  sar =
2980  (AVRational){ enc_ctx->height, enc_ctx->width });
2981  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2982  "with stream copy may produce invalid files\n");
2983  }
2984  else if (ist->st->sample_aspect_ratio.num)
2985  sar = ist->st->sample_aspect_ratio;
2986  else
2987  sar = dec_ctx->sample_aspect_ratio;
2988  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2989  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2990  ost->st->r_frame_rate = ist->st->r_frame_rate;
2991  break;
2992  case AVMEDIA_TYPE_SUBTITLE:
2993  enc_ctx->width = dec_ctx->width;
2994  enc_ctx->height = dec_ctx->height;
2995  break;
2996  case AVMEDIA_TYPE_UNKNOWN:
2997  case AVMEDIA_TYPE_DATA:
2999  break;
3000  default:
3001  abort();
3002  }
3003  } else {
3004  if (!ost->enc)
3005  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3006  if (!ost->enc) {
3007  /* should only happen when a default codec is not present. */
3008  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3009  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3010  ret = AVERROR(EINVAL);
3011  goto dump_format;
3012  }
3013 
3014  set_encoder_id(output_files[ost->file_index], ost);
3015 
3016  if (!ost->filter &&
3017  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3018  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3019  FilterGraph *fg;
3020  fg = init_simple_filtergraph(ist, ost);
3021  if (configure_filtergraph(fg)) {
3022  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3023  exit_program(1);
3024  }
3025  }
3026 
3027  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3028  if (!ost->frame_rate.num)
3030  if (ist && !ost->frame_rate.num)
3031  ost->frame_rate = ist->framerate;
3032  if (ist && !ost->frame_rate.num)
3033  ost->frame_rate = ist->st->r_frame_rate;
3034  if (ist && !ost->frame_rate.num) {
3035  ost->frame_rate = (AVRational){25, 1};
3037  "No information "
3038  "about the input framerate is available. Falling "
3039  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3040  "if you want a different framerate.\n",
3041  ost->file_index, ost->index);
3042  }
3043 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3044  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3046  ost->frame_rate = ost->enc->supported_framerates[idx];
3047  }
3048  // reduce frame rate for mpeg4 to be within the spec limits
3049  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3050  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3051  ost->frame_rate.num, ost->frame_rate.den, 65535);
3052  }
3053  }
3054 
3055  switch (enc_ctx->codec_type) {
3056  case AVMEDIA_TYPE_AUDIO:
3057  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3058  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3059  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3060  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3061  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3062  break;
3063  case AVMEDIA_TYPE_VIDEO:
3064  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3065  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3066  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3067  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3069  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3070  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3071  }
3072  for (j = 0; j < ost->forced_kf_count; j++)
3073  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3075  enc_ctx->time_base);
3076 
3077  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3078  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3079  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3080  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3081  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3083  if (!strncmp(ost->enc->name, "libx264", 7) &&
3084  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3087  "No pixel format specified, %s for H.264 encoding chosen.\n"
3088  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3090  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3091  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3094  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3095  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3097  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3098 
3099  ost->st->avg_frame_rate = ost->frame_rate;
3100 
3101  if (!dec_ctx ||
3102  enc_ctx->width != dec_ctx->width ||
3103  enc_ctx->height != dec_ctx->height ||
3104  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3105  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3106  }
3107 
3108  if (ost->forced_keyframes) {
3109  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3112  if (ret < 0) {
3114  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3115  return ret;
3116  }
3121 
3122  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3123  // parse it only for static kf timings
3124  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3126  }
3127  }
3128  break;
3129  case AVMEDIA_TYPE_SUBTITLE:
3130  enc_ctx->time_base = (AVRational){1, 1000};
3131  if (!enc_ctx->width) {
3132  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3133  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3134  }
3135  break;
3136  case AVMEDIA_TYPE_DATA:
3137  break;
3138  default:
3139  abort();
3140  break;
3141  }
3142  }
3143 
3144  if (ost->disposition) {
3145  static const AVOption opts[] = {
3146  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3147  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3148  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3149  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3150  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3151  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3152  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3153  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3154  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3155  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3156  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3157  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3158  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3159  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3160  { NULL },
3161  };
3162  static const AVClass class = {
3163  .class_name = "",
3164  .item_name = av_default_item_name,
3165  .option = opts,
3166  .version = LIBAVUTIL_VERSION_INT,
3167  };
3168  const AVClass *pclass = &class;
3169 
3170  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3171  if (ret < 0)
3172  goto dump_format;
3173  }
3174  }
3175 
3176  /* open each encoder */
3177  for (i = 0; i < nb_output_streams; i++) {
3178  ret = init_output_stream(output_streams[i], error, sizeof(error));
3179  if (ret < 0)
3180  goto dump_format;
3181  }
3182 
3183  /* init input streams */
3184  for (i = 0; i < nb_input_streams; i++)
3185  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3186  for (i = 0; i < nb_output_streams; i++) {
3187  ost = output_streams[i];
3188  avcodec_close(ost->enc_ctx);
3189  }
3190  goto dump_format;
3191  }
3192 
3193  /* discard unused programs */
3194  for (i = 0; i < nb_input_files; i++) {
3195  InputFile *ifile = input_files[i];
3196  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3197  AVProgram *p = ifile->ctx->programs[j];
3198  int discard = AVDISCARD_ALL;
3199 
3200  for (k = 0; k < p->nb_stream_indexes; k++)
3201  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3202  discard = AVDISCARD_DEFAULT;
3203  break;
3204  }
3205  p->discard = discard;
3206  }
3207  }
3208 
3209  /* open files and write file headers */
3210  for (i = 0; i < nb_output_files; i++) {
3211  oc = output_files[i]->ctx;
3212  oc->interrupt_callback = int_cb;
3213  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3214  snprintf(error, sizeof(error),
3215  "Could not write header for output file #%d "
3216  "(incorrect codec parameters ?): %s",
3217  i, av_err2str(ret));
3218  ret = AVERROR(EINVAL);
3219  goto dump_format;
3220  }
3221 // assert_avoptions(output_files[i]->opts);
3222  if (strcmp(oc->oformat->name, "rtp")) {
3223  want_sdp = 0;
3224  }
3225  }
3226 
3227  dump_format:
3228  /* dump the file output parameters - cannot be done before in case
3229  of stream copy */
3230  for (i = 0; i < nb_output_files; i++) {
3231  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3232  }
3233 
3234  /* dump the stream mapping */
3235  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3236  for (i = 0; i < nb_input_streams; i++) {
3237  ist = input_streams[i];
3238 
3239  for (j = 0; j < ist->nb_filters; j++) {
3240  if (ist->filters[j]->graph->graph_desc) {
3241  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3242  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3243  ist->filters[j]->name);
3244  if (nb_filtergraphs > 1)
3245  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3246  av_log(NULL, AV_LOG_INFO, "\n");
3247  }
3248  }
3249  }
3250 
3251  for (i = 0; i < nb_output_streams; i++) {
3252  ost = output_streams[i];
3253 
3254  if (ost->attachment_filename) {
3255  /* an attached file */
3256  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3257  ost->attachment_filename, ost->file_index, ost->index);
3258  continue;
3259  }
3260 
3261  if (ost->filter && ost->filter->graph->graph_desc) {
3262  /* output from a complex graph */
3263  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3264  if (nb_filtergraphs > 1)
3265  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3266 
3267  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3268  ost->index, ost->enc ? ost->enc->name : "?");
3269  continue;
3270  }
3271 
3272  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3273  input_streams[ost->source_index]->file_index,
3274  input_streams[ost->source_index]->st->index,
3275  ost->file_index,
3276  ost->index);
3277  if (ost->sync_ist != input_streams[ost->source_index])
3278  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3279  ost->sync_ist->file_index,
3280  ost->sync_ist->st->index);
3281  if (ost->stream_copy)
3282  av_log(NULL, AV_LOG_INFO, " (copy)");
3283  else {
3284  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3285  const AVCodec *out_codec = ost->enc;
3286  const char *decoder_name = "?";
3287  const char *in_codec_name = "?";
3288  const char *encoder_name = "?";
3289  const char *out_codec_name = "?";
3290  const AVCodecDescriptor *desc;
3291 
3292  if (in_codec) {
3293  decoder_name = in_codec->name;
3294  desc = avcodec_descriptor_get(in_codec->id);
3295  if (desc)
3296  in_codec_name = desc->name;
3297  if (!strcmp(decoder_name, in_codec_name))
3298  decoder_name = "native";
3299  }
3300 
3301  if (out_codec) {
3302  encoder_name = out_codec->name;
3303  desc = avcodec_descriptor_get(out_codec->id);
3304  if (desc)
3305  out_codec_name = desc->name;
3306  if (!strcmp(encoder_name, out_codec_name))
3307  encoder_name = "native";
3308  }
3309 
3310  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3311  in_codec_name, decoder_name,
3312  out_codec_name, encoder_name);
3313  }
3314  av_log(NULL, AV_LOG_INFO, "\n");
3315  }
3316 
3317  if (ret) {
3318  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3319  return ret;
3320  }
3321 
3322  if (sdp_filename || want_sdp) {
3323  print_sdp();
3324  }
3325 
3326  transcode_init_done = 1;
3327 
3328  return 0;
3329 }
3330 
3331 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3332 static int need_output(void)
3333 {
3334  int i;
3335 
3336  for (i = 0; i < nb_output_streams; i++) {
3337  OutputStream *ost = output_streams[i];
3338  OutputFile *of = output_files[ost->file_index];
3339  AVFormatContext *os = output_files[ost->file_index]->ctx;
3340 
3341  if (ost->finished ||
3342  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3343  continue;
3344  if (ost->frame_number >= ost->max_frames) {
3345  int j;
3346  for (j = 0; j < of->ctx->nb_streams; j++)
3347  close_output_stream(output_streams[of->ost_index + j]);
3348  continue;
3349  }
3350 
3351  return 1;
3352  }
3353 
3354  return 0;
3355 }
3356 
3357 /**
3358  * Select the output stream to process.
3359  *
3360  * @return selected output stream, or NULL if none available
3361  */
3363 {
3364  int i;
3365  int64_t opts_min = INT64_MAX;
3366  OutputStream *ost_min = NULL;
3367 
3368  for (i = 0; i < nb_output_streams; i++) {
3369  OutputStream *ost = output_streams[i];
3370  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3371  AV_TIME_BASE_Q);
3372  if (!ost->finished && opts < opts_min) {
3373  opts_min = opts;
3374  ost_min = ost->unavailable ? NULL : ost;
3375  }
3376  }
3377  return ost_min;
3378 }
3379 
3381 {
3382  int i, ret, key;
3383  static int64_t last_time;
3384  if (received_nb_signals)
3385  return AVERROR_EXIT;
3386  /* read_key() returns 0 on EOF */
3387  if(cur_time - last_time >= 100000 && !run_as_daemon){
3388  key = read_key();
3389  last_time = cur_time;
3390  }else
3391  key = -1;
3392  if (key == 'q')
3393  return AVERROR_EXIT;
3394  if (key == '+') av_log_set_level(av_log_get_level()+10);
3395  if (key == '-') av_log_set_level(av_log_get_level()-10);
3396  if (key == 's') qp_hist ^= 1;
3397  if (key == 'h'){
3398  if (do_hex_dump){
3399  do_hex_dump = do_pkt_dump = 0;
3400  } else if(do_pkt_dump){
3401  do_hex_dump = 1;
3402  } else
3403  do_pkt_dump = 1;
3405  }
3406  if (key == 'c' || key == 'C'){
3407  char buf[4096], target[64], command[256], arg[256] = {0};
3408  double time;
3409  int k, n = 0;
3410  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3411  i = 0;
3412  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3413  if (k > 0)
3414  buf[i++] = k;
3415  buf[i] = 0;
3416  if (k > 0 &&
3417  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3418  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3419  target, time, command, arg);
3420  for (i = 0; i < nb_filtergraphs; i++) {
3421  FilterGraph *fg = filtergraphs[i];
3422  if (fg->graph) {
3423  if (time < 0) {
3424  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3425  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3426  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3427  } else if (key == 'c') {
3428  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3429  ret = AVERROR_PATCHWELCOME;
3430  } else {
3431  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3432  if (ret < 0)
3433  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3434  }
3435  }
3436  }
3437  } else {
3439  "Parse error, at least 3 arguments were expected, "
3440  "only %d given in string '%s'\n", n, buf);
3441  }
3442  }
3443  if (key == 'd' || key == 'D'){
3444  int debug=0;
3445  if(key == 'D') {
3446  debug = input_streams[0]->st->codec->debug<<1;
3447  if(!debug) debug = 1;
3448  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3449  debug += debug;
3450  }else{
3451  char buf[32];
3452  int k = 0;
3453  i = 0;
3454  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3455  if (k > 0)
3456  buf[i++] = k;
3457  buf[i] = 0;
3458  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3459  fprintf(stderr,"error parsing debug value\n");
3460  }
3461  for(i=0;i<nb_input_streams;i++) {
3462  input_streams[i]->st->codec->debug = debug;
3463  }
3464  for(i=0;i<nb_output_streams;i++) {
3465  OutputStream *ost = output_streams[i];
3466  ost->enc_ctx->debug = debug;
3467  }
3468  if(debug) av_log_set_level(AV_LOG_DEBUG);
3469  fprintf(stderr,"debug=%d\n", debug);
3470  }
3471  if (key == '?'){
3472  fprintf(stderr, "key function\n"
3473  "? show this help\n"
3474  "+ increase verbosity\n"
3475  "- decrease verbosity\n"
3476  "c Send command to first matching filter supporting it\n"
3477  "C Send/Que command to all matching filters\n"
3478  "D cycle through available debug modes\n"
3479  "h dump packets/hex press to cycle through the 3 states\n"
3480  "q quit\n"
3481  "s Show QP histogram\n"
3482  );
3483  }
3484  return 0;
3485 }
3486 
3487 #if HAVE_PTHREADS
3488 static void *input_thread(void *arg)
3489 {
3490  InputFile *f = arg;
3491  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3492  int ret = 0;
3493 
3494  while (1) {
3495  AVPacket pkt;
3496  ret = av_read_frame(f->ctx, &pkt);
3497 
3498  if (ret == AVERROR(EAGAIN)) {
3499  av_usleep(10000);
3500  continue;
3501  }
3502  if (ret < 0) {
3503  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3504  break;
3505  }
3506  av_dup_packet(&pkt);
3507  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3508  if (flags && ret == AVERROR(EAGAIN)) {
3509  flags = 0;
3510  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3512  "Thread message queue blocking; consider raising the "
3513  "thread_queue_size option (current value: %d)\n",
3514  f->thread_queue_size);
3515  }
3516  if (ret < 0) {
3517  if (ret != AVERROR_EOF)
3518  av_log(f->ctx, AV_LOG_ERROR,
3519  "Unable to send packet to main thread: %s\n",
3520  av_err2str(ret));
3521  av_free_packet(&pkt);
3522  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3523  break;
3524  }
3525  }
3526 
3527  return NULL;
3528 }
3529 
3530 static void free_input_threads(void)
3531 {
3532  int i;
3533 
3534  for (i = 0; i < nb_input_files; i++) {
3535  InputFile *f = input_files[i];
3536  AVPacket pkt;
3537 
3538  if (!f || !f->in_thread_queue)
3539  continue;
3541  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3542  av_free_packet(&pkt);
3543 
3544  pthread_join(f->thread, NULL);
3545  f->joined = 1;
3546  av_thread_message_queue_free(&f->in_thread_queue);
3547  }
3548 }
3549 
3550 static int init_input_threads(void)
3551 {
3552  int i, ret;
3553 
3554  if (nb_input_files == 1)
3555  return 0;
3556 
3557  for (i = 0; i < nb_input_files; i++) {
3558  InputFile *f = input_files[i];
3559 
3560  if (f->ctx->pb ? !f->ctx->pb->seekable :
3561  strcmp(f->ctx->iformat->name, "lavfi"))
3562  f->non_blocking = 1;
3563  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3564  f->thread_queue_size, sizeof(AVPacket));
3565  if (ret < 0)
3566  return ret;
3567 
3568  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3569  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3570  av_thread_message_queue_free(&f->in_thread_queue);
3571  return AVERROR(ret);
3572  }
3573  }
3574  return 0;
3575 }
3576 
3577 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3578 {
3579  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3580  f->non_blocking ?
3582 }
3583 #endif
3584 
3586 {
3587  if (f->rate_emu) {
3588  int i;
3589  for (i = 0; i < f->nb_streams; i++) {
3590  InputStream *ist = input_streams[f->ist_index + i];
3591  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3592  int64_t now = av_gettime_relative() - ist->start;
3593  if (pts > now)
3594  return AVERROR(EAGAIN);
3595  }
3596  }
3597 
3598 #if HAVE_PTHREADS
3599  if (nb_input_files > 1)
3600  return get_input_packet_mt(f, pkt);
3601 #endif
3602  return av_read_frame(f->ctx, pkt);
3603 }
3604 
3605 static int got_eagain(void)
3606 {
3607  int i;
3608  for (i = 0; i < nb_output_streams; i++)
3609  if (output_streams[i]->unavailable)
3610  return 1;
3611  return 0;
3612 }
3613 
3614 static void reset_eagain(void)
3615 {
3616  int i;
3617  for (i = 0; i < nb_input_files; i++)
3618  input_files[i]->eagain = 0;
3619  for (i = 0; i < nb_output_streams; i++)
3620  output_streams[i]->unavailable = 0;
3621 }
3622 
3623 /*
3624  * Return
3625  * - 0 -- one packet was read and processed
3626  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3627  * this function should be called again
3628  * - AVERROR_EOF -- this function should not be called again
3629  */
3630 static int process_input(int file_index)
3631 {
3632  InputFile *ifile = input_files[file_index];
3633  AVFormatContext *is;
3634  InputStream *ist;
3635  AVPacket pkt;
3636  int ret, i, j;
3637 
3638  is = ifile->ctx;
3639  ret = get_input_packet(ifile, &pkt);
3640 
3641  if (ret == AVERROR(EAGAIN)) {
3642  ifile->eagain = 1;
3643  return ret;
3644  }
3645  if (ret < 0) {
3646  if (ret != AVERROR_EOF) {
3647  print_error(is->filename, ret);
3648  if (exit_on_error)
3649  exit_program(1);
3650  }
3651 
3652  for (i = 0; i < ifile->nb_streams; i++) {
3653  ist = input_streams[ifile->ist_index + i];
3654  if (ist->decoding_needed) {
3655  ret = process_input_packet(ist, NULL);
3656  if (ret>0)
3657  return 0;
3658  }
3659 
3660  /* mark all outputs that don't go through lavfi as finished */
3661  for (j = 0; j < nb_output_streams; j++) {
3662  OutputStream *ost = output_streams[j];
3663 
3664  if (ost->source_index == ifile->ist_index + i &&
3665  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3666  finish_output_stream(ost);
3667  }
3668  }
3669 
3670  ifile->eof_reached = 1;
3671  return AVERROR(EAGAIN);
3672  }
3673 
3674  reset_eagain();
3675 
3676  if (do_pkt_dump) {
3678  is->streams[pkt.stream_index]);
3679  }
3680  /* the following test is needed in case new streams appear
3681  dynamically in stream : we ignore them */
3682  if (pkt.stream_index >= ifile->nb_streams) {
3683  report_new_stream(file_index, &pkt);
3684  goto discard_packet;
3685  }
3686 
3687  ist = input_streams[ifile->ist_index + pkt.stream_index];
3688 
3689  ist->data_size += pkt.size;
3690  ist->nb_packets++;
3691 
3692  if (ist->discard)
3693  goto discard_packet;
3694 
3695  if (debug_ts) {
3696  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3697  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3701  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3702  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3703  av_ts2str(input_files[ist->file_index]->ts_offset),
3704  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3705  }
3706 
3707  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3708  int64_t stime, stime2;
3709  // Correcting starttime based on the enabled streams
3710  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3711  // so we instead do it here as part of discontinuity handling
3712  if ( ist->next_dts == AV_NOPTS_VALUE
3713  && ifile->ts_offset == -is->start_time
3714  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3715  int64_t new_start_time = INT64_MAX;
3716  for (i=0; i<is->nb_streams; i++) {
3717  AVStream *st = is->streams[i];
3718  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3719  continue;
3720  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3721  }
3722  if (new_start_time > is->start_time) {
3723  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3724  ifile->ts_offset = -new_start_time;
3725  }
3726  }
3727 
3728  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3729  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3730  ist->wrap_correction_done = 1;
3731 
3732  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3733  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3734  ist->wrap_correction_done = 0;
3735  }
3736  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3737  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3738  ist->wrap_correction_done = 0;
3739  }
3740  }
3741 
3742  /* add the stream-global side data to the first packet */
3743  if (ist->nb_packets == 1) {
3744  if (ist->st->nb_side_data)
3746  for (i = 0; i < ist->st->nb_side_data; i++) {
3747  AVPacketSideData *src_sd = &ist->st->side_data[i];
3748  uint8_t *dst_data;
3749 
3750  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3751  continue;
3752  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3753  continue;
3754 
3755  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3756  if (!dst_data)
3757  exit_program(1);
3758 
3759  memcpy(dst_data, src_sd->data, src_sd->size);
3760  }
3761  }
3762 
3763  if (pkt.dts != AV_NOPTS_VALUE)
3764  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3765  if (pkt.pts != AV_NOPTS_VALUE)
3766  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3767 
3768  if (pkt.pts != AV_NOPTS_VALUE)
3769  pkt.pts *= ist->ts_scale;
3770  if (pkt.dts != AV_NOPTS_VALUE)
3771  pkt.dts *= ist->ts_scale;
3772 
3773  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3775  pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3776  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3777  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3778  int64_t delta = pkt_dts - ifile->last_ts;
3779  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3780  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3781  ifile->ts_offset -= delta;
3783  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3784  delta, ifile->ts_offset);
3785  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3786  if (pkt.pts != AV_NOPTS_VALUE)
3787  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3788  }
3789  }
3790 
3791  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3793  pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3794  !copy_ts) {
3795  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3796  int64_t delta = pkt_dts - ist->next_dts;
3797  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3798  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3799  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3800  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3801  ifile->ts_offset -= delta;
3803  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3804  delta, ifile->ts_offset);
3805  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3806  if (pkt.pts != AV_NOPTS_VALUE)
3807  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3808  }
3809  } else {
3810  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3811  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3812  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3813  pkt.dts = AV_NOPTS_VALUE;
3814  }
3815  if (pkt.pts != AV_NOPTS_VALUE){
3816  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3817  delta = pkt_pts - ist->next_dts;
3818  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3819  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3820  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3821  pkt.pts = AV_NOPTS_VALUE;
3822  }
3823  }
3824  }
3825  }
3826 
3827  if (pkt.dts != AV_NOPTS_VALUE)
3828  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3829 
3830  if (debug_ts) {
3831  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3833  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3834  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3835  av_ts2str(input_files[ist->file_index]->ts_offset),
3836  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3837  }
3838 
3839  sub2video_heartbeat(ist, pkt.pts);
3840 
3841  process_input_packet(ist, &pkt);
3842 
3843 discard_packet:
3844  av_free_packet(&pkt);
3845 
3846  return 0;
3847 }
3848 
3849 /**
3850  * Perform a step of transcoding for the specified filter graph.
3851  *
3852  * @param[in] graph filter graph to consider
3853  * @param[out] best_ist input stream where a frame would allow to continue
3854  * @return 0 for success, <0 for error
3855  */
3856 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3857 {
3858  int i, ret;
3859  int nb_requests, nb_requests_max = 0;
3860  InputFilter *ifilter;
3861  InputStream *ist;
3862 
3863  *best_ist = NULL;
3864  ret = avfilter_graph_request_oldest(graph->graph);
3865  if (ret >= 0)
3866  return reap_filters(0);
3867 
3868  if (ret == AVERROR_EOF) {
3869  ret = reap_filters(1);
3870  for (i = 0; i < graph->nb_outputs; i++)
3871  close_output_stream(graph->outputs[i]->ost);
3872  return ret;
3873  }
3874  if (ret != AVERROR(EAGAIN))
3875  return ret;
3876 
3877  for (i = 0; i < graph->nb_inputs; i++) {
3878  ifilter = graph->inputs[i];
3879  ist = ifilter->ist;
3880  if (input_files[ist->file_index]->eagain ||
3881  input_files[ist->file_index]->eof_reached)
3882  continue;
3883  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3884  if (nb_requests > nb_requests_max) {
3885  nb_requests_max = nb_requests;
3886  *best_ist = ist;
3887  }
3888  }
3889 
3890  if (!*best_ist)
3891  for (i = 0; i < graph->nb_outputs; i++)
3892  graph->outputs[i]->ost->unavailable = 1;
3893 
3894  return 0;
3895 }
3896 
3897 /**
3898  * Run a single step of transcoding.
3899  *
3900  * @return 0 for success, <0 for error
3901  */
3902 static int transcode_step(void)
3903 {
3904  OutputStream *ost;
3905  InputStream *ist;
3906  int ret;
3907 
3908  ost = choose_output();
3909  if (!ost) {
3910  if (got_eagain()) {
3911  reset_eagain();
3912  av_usleep(10000);
3913  return 0;
3914  }
3915  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3916  return AVERROR_EOF;
3917  }
3918 
3919  if (ost->filter) {
3920  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3921  return ret;
3922  if (!ist)
3923  return 0;
3924  } else {
3925  av_assert0(ost->source_index >= 0);
3926  ist = input_streams[ost->source_index];
3927  }
3928 
3929  ret = process_input(ist->file_index);
3930  if (ret == AVERROR(EAGAIN)) {
3931  if (input_files[ist->file_index]->eagain)
3932  ost->unavailable = 1;
3933  return 0;
3934  }
3935 
3936  if (ret < 0)
3937  return ret == AVERROR_EOF ? 0 : ret;
3938 
3939  return reap_filters(0);
3940 }
3941 
3942 /*
3943  * The following code is the main loop of the file converter
3944  */
3945 static int transcode(void)
3946 {
3947  int ret, i;
3948  AVFormatContext *os;
3949  OutputStream *ost;
3950  InputStream *ist;
3951  int64_t timer_start;
3952 
3953  ret = transcode_init();
3954  if (ret < 0)
3955  goto fail;
3956 
3957  if (stdin_interaction) {
3958  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3959  }
3960 
3961  timer_start = av_gettime_relative();
3962 
3963 #if HAVE_PTHREADS
3964  if ((ret = init_input_threads()) < 0)
3965  goto fail;
3966 #endif
3967 
3968  while (!received_sigterm) {
3969  int64_t cur_time= av_gettime_relative();
3970 
3971  /* if 'q' pressed, exits */
3972  if (stdin_interaction)
3973  if (check_keyboard_interaction(cur_time) < 0)
3974  break;
3975 
3976  /* check if there's any stream where output is still needed */
3977  if (!need_output()) {
3978  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3979  break;
3980  }
3981 
3982  ret = transcode_step();
3983  if (ret < 0) {
3984  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
3985  continue;
3986  } else {
3987  char errbuf[128];
3988  av_strerror(ret, errbuf, sizeof(errbuf));
3989 
3990  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
3991  break;
3992  }
3993  }
3994 
3995  /* dump report by using the output first video and audio streams */
3996  print_report(0, timer_start, cur_time);
3997  }
3998 #if HAVE_PTHREADS
3999  free_input_threads();
4000 #endif
4001 
4002  /* at the end of stream, we must flush the decoder buffers */
4003  for (i = 0; i < nb_input_streams; i++) {
4004  ist = input_streams[i];
4005  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4006  process_input_packet(ist, NULL);
4007  }
4008  }
4009  flush_encoders();
4010 
4011  term_exit();
4012 
4013  /* write the trailer if needed and close file */
4014  for (i = 0; i < nb_output_files; i++) {
4015  os = output_files[i]->ctx;
4016  av_write_trailer(os);
4017  }
4018 
4019  /* dump report by using the first video and audio streams */
4020  print_report(1, timer_start, av_gettime_relative());
4021 
4022  /* close each encoder */
4023  for (i = 0; i < nb_output_streams; i++) {
4024  ost = output_streams[i];
4025  if (ost->encoding_needed) {
4026  av_freep(&ost->enc_ctx->stats_in);
4027  }
4028  }
4029 
4030  /* close each decoder */
4031  for (i = 0; i < nb_input_streams; i++) {
4032  ist = input_streams[i];
4033  if (ist->decoding_needed) {
4034  avcodec_close(ist->dec_ctx);
4035  if (ist->hwaccel_uninit)
4036  ist->hwaccel_uninit(ist->dec_ctx);
4037  }
4038  }
4039 
4040  /* finished ! */
4041  ret = 0;
4042 
4043  fail:
4044 #if HAVE_PTHREADS
4045  free_input_threads();
4046 #endif
4047 
4048  if (output_streams) {
4049  for (i = 0; i < nb_output_streams; i++) {
4050  ost = output_streams[i];
4051  if (ost) {
4052  if (ost->logfile) {
4053  fclose(ost->logfile);
4054  ost->logfile = NULL;
4055  }
4056  av_freep(&ost->forced_kf_pts);
4057  av_freep(&ost->apad);
4058  av_freep(&ost->disposition);
4059  av_dict_free(&ost->encoder_opts);
4060  av_dict_free(&ost->sws_dict);
4061  av_dict_free(&ost->swr_opts);
4062  av_dict_free(&ost->resample_opts);
4063  av_dict_free(&ost->bsf_args);
4064  }
4065  }
4066  }
4067  return ret;
4068 }
4069 
4070 
4071 static int64_t getutime(void)
4072 {
4073 #if HAVE_GETRUSAGE
4074  struct rusage rusage;
4075 
4076  getrusage(RUSAGE_SELF, &rusage);
4077  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4078 #elif HAVE_GETPROCESSTIMES
4079  HANDLE proc;
4080  FILETIME c, e, k, u;
4081  proc = GetCurrentProcess();
4082  GetProcessTimes(proc, &c, &e, &k, &u);
4083  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4084 #else
4085  return av_gettime_relative();
4086 #endif
4087 }
4088 
4089 static int64_t getmaxrss(void)
4090 {
4091 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4092  struct rusage rusage;
4093  getrusage(RUSAGE_SELF, &rusage);
4094  return (int64_t)rusage.ru_maxrss * 1024;
4095 #elif HAVE_GETPROCESSMEMORYINFO
4096  HANDLE proc;
4097  PROCESS_MEMORY_COUNTERS memcounters;
4098  proc = GetCurrentProcess();
4099  memcounters.cb = sizeof(memcounters);
4100  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4101  return memcounters.PeakPagefileUsage;
4102 #else
4103  return 0;
4104 #endif
4105 }
4106 
4107 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4108 {
4109 }
4110 
4111 int main(int argc, char **argv)
4112 {
4113  int ret;
4114  int64_t ti;
4115 
4117 
4118  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4119 
4121  parse_loglevel(argc, argv, options);
4122 
4123  if(argc>1 && !strcmp(argv[1], "-d")){
4124  run_as_daemon=1;
4126  argc--;
4127  argv++;
4128  }
4129 
4131 #if CONFIG_AVDEVICE
4133 #endif
4135  av_register_all();
4137 
4138  show_banner(argc, argv, options);
4139 
4140  term_init();
4141 
4142  /* parse options and open all input/output files */
4143  ret = ffmpeg_parse_options(argc, argv);
4144  if (ret < 0)
4145  exit_program(1);
4146 
4147  if (nb_output_files <= 0 && nb_input_files == 0) {
4148  show_usage();
4149  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4150  exit_program(1);
4151  }
4152 
4153  /* file converter / grab */
4154  if (nb_output_files <= 0) {
4155  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4156  exit_program(1);
4157  }
4158 
4159 // if (nb_input_files == 0) {
4160 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4161 // exit_program(1);
4162 // }
4163 
4164  current_time = ti = getutime();
4165  if (transcode() < 0)
4166  exit_program(1);
4167  ti = getutime() - ti;
4168  if (do_benchmark) {
4169  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4170  }
4171  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4174  exit_program(69);
4175 
4177  return main_return_code;
4178 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1474
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:271
#define extra_bits(eb)
Definition: intrax8.c:152
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:797
int got_output
Definition: ffmpeg.h:296
#define AV_DISPOSITION_METADATA
Definition: avformat.h:826
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2854
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1802
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1018
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1914
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:388
const struct AVCodec * codec
Definition: avcodec.h:1511
Definition: ffmpeg.h:367
AVRational framerate
Definition: avcodec.h:3302
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:322
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:111
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:476
void term_init(void)
Definition: ffmpeg.c:369
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:282
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:230
int nb_outputs
Definition: ffmpeg.h:247
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3746
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
AVDictionary * swr_opts
Definition: ffmpeg.h:437
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:257
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2129
int resample_channels
Definition: ffmpeg.h:291
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:311
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2997
int stream_copy
Definition: ffmpeg.h:443
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:932
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3774
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1523
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2864
AVOption.
Definition: opt.h:255
AVRational frame_rate
Definition: ffmpeg.h:408
int64_t * forced_kf_pts
Definition: ffmpeg.h:416
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:291
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2941
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:432
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:776
#define CODEC_FLAG_PASS2
Definition: avcodec.h:978
static int process_input(int file_index)
Definition: ffmpeg.c:3630
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:447
int exit_on_error
Definition: ffmpeg_opt.c:105
const char * fmt
Definition: avisynth_c.h:632
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2585
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1448
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
static int run_as_daemon
Definition: ffmpeg.c:130
Memory buffer source API.
void av_log_set_level(int level)
Set the log level.
Definition: log.c:382
AVRational framerate
Definition: ffmpeg.h:280
AVCodecParserContext * parser
Definition: ffmpeg.h:451
static int64_t cur_time
Definition: ffserver.c:255
int decoding_needed
Definition: ffmpeg.h:255
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:914
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1529
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:411
int index
stream index in AVFormatContext
Definition: avformat.h:843
int size
Definition: avcodec.h:1424
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4089
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:131
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2572
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:486
#define AV_DISPOSITION_DUB
Definition: avformat.h:798
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1902
int eagain
Definition: ffmpeg.h:343
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1122
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1722
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:398
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:605
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:810
int quality
Definition: ffmpeg.h:463
unsigned num_rects
Definition: avcodec.h:3803
AVFrame * filter_frame
Definition: ffmpeg.h:262
static int transcode_init(void)
Definition: ffmpeg.c:2774
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2579
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2779
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:98
enum AVMediaType type
Definition: avcodec.h:3485
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:812
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:653
discard all
Definition: avcodec.h:689
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:954
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:325
int64_t input_ts_offset
Definition: ffmpeg.h:345
int do_hex_dump
Definition: ffmpeg_opt.c:99
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1883
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3003
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2865
int nb_input_streams
Definition: ffmpeg.c:141
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:70
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2789
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2718
Picture data structure.
Definition: avcodec.h:3744
uint64_t packets_written
Definition: ffmpeg.h:457
AVCodec.
Definition: avcodec.h:3472
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:182
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2299
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:483
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Setup the picture fields based on the specified image parameters and the provided image data buffer...
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:106
float dts_error_threshold
Definition: ffmpeg_opt.c:90
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:477
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
uint64_t data_size
Definition: ffmpeg.h:455
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:459
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:2277
#define log2(x)
Definition: libm.h:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:802
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1631
Undefined.
Definition: avutil.h:265
AVSubtitleRect ** rects
Definition: avcodec.h:3804
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2337
int encoding_needed
Definition: ffmpeg.h:387
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:610
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4107
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3380
Format I/O context.
Definition: avformat.h:1273
uint64_t samples_decoded
Definition: ffmpeg.h:337
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:221
unsigned int nb_stream_indexes
Definition: avformat.h:1211
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:72
int64_t cur_dts
Definition: avformat.h:1019
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3776
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:882
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
uint64_t frames_decoded
Definition: ffmpeg.h:336
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:241
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:953
char * logfile_prefix
Definition: ffmpeg.h:427
static uint8_t * subtitle_out
Definition: ffmpeg.c:138
static int main_return_code
Definition: ffmpeg.c:321
static int64_t start_time
Definition: ffplay.c:325
int copy_initial_nonkeyframes
Definition: ffmpeg.h:445
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:111
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:532
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
AVDictionary * sws_dict
Definition: ffmpeg.h:436
Opaque data information usually continuous.
Definition: avutil.h:195
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:186
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:215
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3237
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:642
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
attribute_deprecated void(* destruct)(struct AVPacket *)
Definition: avcodec.h:1444
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3745
int stdin_interaction
Definition: ffmpeg_opt.c:108
FILE * logfile
Definition: ffmpeg.h:428
AVDictionary * opts
Definition: ffmpeg.h:474
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:565
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1279
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
libavcodec/libavfilter gluing utilities
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2468
static int need_output(void)
Definition: ffmpeg.c:3332
int last_droped
Definition: ffmpeg.h:404
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:365
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:958
static double psnr(double d)
Definition: ffmpeg.c:1255
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1617
int do_benchmark
Definition: ffmpeg_opt.c:97
Keep a reference to the frame.
Definition: buffersrc.h:62
int audio_sync_method
Definition: ffmpeg_opt.c:93
int shortest
Definition: ffmpeg.h:480
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1341
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2160
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4071
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:94
const char * name
Definition: avcodec.h:5374
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:351
uint8_t * data
Definition: avcodec.h:1423
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:438
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2655
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:82
AVFilterContext * filter
Definition: ffmpeg.h:227
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4200
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:143
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:408
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1317
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:320
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1260
int resample_sample_rate
Definition: ffmpeg.h:290
uint8_t * data
Definition: avcodec.h:1373
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:367
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:487
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:289
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3777
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:390
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2996
AVCodec * dec
Definition: ffmpeg.h:260
static int64_t duration
Definition: ffplay.c:326
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1209
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2771
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:178
int top_field_first
Definition: ffmpeg.h:281
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1401
int nb_output_streams
Definition: ffmpeg.c:146
int file_index
Definition: ffmpeg.h:251
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1441
const OptionDef options[]
Definition: ffserver.c:3807
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5369
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2244
unsigned int * stream_index
Definition: avformat.h:1210
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:287
int resample_height
Definition: ffmpeg.h:285
int wrap_correction_done
Definition: ffmpeg.h:272
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:274
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:262
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:824
unsigned m
Definition: audioconvert.c:187
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:117
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1292
int64_t next_dts
Definition: ffmpeg.h:267
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1469
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:480
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2902
static volatile int transcode_init_done
Definition: ffmpeg.c:319
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3486
#define isatty(fd)
Definition: checkasm.c:52
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3533
int rate_emu
Definition: ffmpeg.h:354
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1812
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1298
static void reset_eagain(void)
Definition: ffmpeg.c:3614
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:636
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2408
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:327
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1546
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:594
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2923
FilterGraph ** filtergraphs
Definition: ffmpeg.c:150
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:464
AVFilterContext * filter
Definition: ffmpeg.h:220
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:324
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:474
int64_t start
Definition: ffmpeg.h:264
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3775
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:334
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:397
int video_sync_method
Definition: ffmpeg_opt.c:94
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:133
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:175
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
char * sdp_filename
Definition: ffmpeg_opt.c:86
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:405
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2202
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:117
int capabilities
Codec capabilities.
Definition: avcodec.h:3491
int initial_padding
Audio only.
Definition: avcodec.h:3294
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:125
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1424
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:425
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1406
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1597
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
AVChapter ** chapters
Definition: avformat.h:1475
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2604
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:123
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1375
int av_log_get_level(void)
Get the current log level.
Definition: log.c:377
const char * name
Name of the codec implementation.
Definition: avcodec.h:3479
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:807
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:71
int side_data_elems
Definition: avcodec.h:1435
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:587
int force_fps
Definition: ffmpeg.h:409
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:925
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1206
#define FFMAX(a, b)
Definition: common.h:79
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
int qp_hist
Definition: ffmpeg_opt.c:107
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:57
float frame_drop_threshold
Definition: ffmpeg_opt.c:95
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:936
int64_t error[4]
Definition: ffmpeg.h:469
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1429
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2935
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2323
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
uint32_t end_display_time
Definition: avcodec.h:3802
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3805
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:861
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2581
OutputFilter * filter
Definition: ffmpeg.h:430
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:413
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:801
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1481
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:809
common internal API header
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1329
static int nb_frames_drop
Definition: ffmpeg.c:132
A bitmap, pict will be set.
Definition: avcodec.h:3756
int nb_output_files
Definition: ffmpeg.c:148
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:160
int bit_rate
the average bitrate
Definition: avcodec.h:1567
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:198
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
static int transcode(void)
Definition: ffmpeg.c:3945
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:788
char filename[1024]
input or output filename
Definition: avformat.h:1349
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3784
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:127
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
#define FFMIN(a, b)
Definition: common.h:81
float y
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int saw_first_ts
Definition: ffmpeg.h:277
This side data contains quality related information from the encoder.
Definition: avcodec.h:1303
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1934
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:69
struct OutputStream * ost
Definition: ffmpeg.h:228
int width
picture width / height.
Definition: avcodec.h:1681
PVOID HANDLE
char * apad
Definition: ffmpeg.h:440
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:421
const char * name
Definition: avformat.h:513
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:221
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:767
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:751
int nb_filtergraphs
Definition: ffmpeg.c:151
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:80
int64_t last_ts
Definition: ffmpeg.h:347
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:68
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:283
int do_pkt_dump
Definition: ffmpeg_opt.c:100
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2558
int64_t max_frames
Definition: ffmpeg.h:401
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:326
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:303
float u
int audio_channels_mapped
Definition: ffmpeg.h:425
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:916
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1640
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:967
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:192
Opaque data information usually sparse.
Definition: avutil.h:197
#define FF_API_DESTRUCT_PACKET
Definition: version.h:83
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:112
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3605
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:228
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:3035
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:116
#define av_log2
Definition: intmath.h:100
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:404
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:811
int ret
Definition: ffmpeg.h:297
int audio_volume
Definition: ffmpeg_opt.c:92
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:842
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:472
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
InputFilter ** filters
Definition: ffmpeg.h:312
int fix_sub_duration
Definition: ffmpeg.h:294
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:350
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4213
Definition: ffmpeg.h:69
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2282
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:64
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:797
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:169
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2733
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Filter bitstream.
int frame_size
Definition: mxfenc.c:1805
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:50
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:825
int ost_index
Definition: ffmpeg.h:475
AVS_Value src
Definition: avisynth_c.h:482
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:713
enum AVMediaType codec_type
Definition: avcodec.h:1510
double ts_scale
Definition: ffmpeg.h:276
int unavailable
Definition: ffmpeg.h:442
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3492
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:164
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2525
enum AVCodecID codec_id
Definition: avcodec.h:1519
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:313
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1476
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:184
float max_error_rate
Definition: ffmpeg_opt.c:110
int sample_rate
samples per second
Definition: avcodec.h:2262
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
uint64_t frames_encoded
Definition: ffmpeg.h:459
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2062
AVIOContext * pb
I/O context.
Definition: avformat.h:1315
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:166
int ist_index
Definition: ffmpeg.h:344
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:538
uint8_t flags
Definition: pixdesc.h:90
int debug
debug
Definition: avcodec.h:2842
static void print_sdp(void)
Definition: ffmpeg.c:2433
const char * graph_desc
Definition: ffmpeg.h:239
int guess_layout_max
Definition: ffmpeg.h:282
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int64_t start_time
Definition: ffmpeg.h:348
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:468
main external API structure.
Definition: avcodec.h:1502
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:357
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:466
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:765
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2883
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
const char * attachment_filename
Definition: ffmpeg.h:444
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1534
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1785
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:596
AVCodecContext * enc_ctx
Definition: ffmpeg.h:399
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:261
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1618
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
Definition: avcodec.h:5367
AVCodecContext * dec_ctx
Definition: ffmpeg.h:259
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3856
AVStream * st
Definition: ffmpeg.h:252
int * audio_channels_map
Definition: ffmpeg.h:424
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1772
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:145
int index
Definition: gxfenc.c:89
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:907
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:383
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:48
static int current_time
Definition: ffmpeg.c:135
int64_t sync_opts
Definition: ffmpeg.h:392
char * vstats_filename
Definition: ffmpeg_opt.c:85
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:145
char * disposition
Definition: ffmpeg.h:447
#define mid_pred
Definition: mathops.h:95
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:684
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:918
int nb_streams_warn
Definition: ffmpeg.h:353
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2523
AVDictionary * decoder_opts
Definition: ffmpeg.h:279
int autorotate
Definition: ffmpeg.h:284
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:574
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1349
int showed_multi_packet_warning
Definition: ffmpeg.h:278
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:109
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3686
int64_t ts_offset
Definition: ffmpeg.h:346
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:328
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:869
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3902
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:433
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3410
misc parsing utilities
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1478
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:209
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:358
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:566
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
AVFrame * filtered_frame
Definition: ffmpeg.h:402
int source_index
Definition: ffmpeg.h:385
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
static volatile int received_nb_signals
Definition: ffmpeg.c:318
int copy_prior_start
Definition: ffmpeg.h:446
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:464
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1583
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:627
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:84
int nb_filters
Definition: ffmpeg.h:313
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2477
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1358
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:420
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:268
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:68
int resample_sample_fmt
Definition: ffmpeg.h:289
int forced_kf_count
Definition: ffmpeg.h:417
int64_t start
Definition: avformat.h:1239
OSTFinished finished
Definition: ffmpeg.h:441
char * forced_keyframes
Definition: ffmpeg.h:419
uint64_t data_size
Definition: ffmpeg.h:332
int resample_width
Definition: ffmpeg.h:286
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:270
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1024
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1434
struct FilterGraph * graph
Definition: ffmpeg.h:229
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t limit_filesize
Definition: ffmpeg.h:478
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1412
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:79
AVIOContext * progress_avio
Definition: ffmpeg.c:136
if(ret< 0)
Definition: vf_mcdeint.c:280
int main(int argc, char **argv)
Definition: ffmpeg.c:4111
int reinit_filters
Definition: ffmpeg.h:315
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:465
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:261
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:928
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:894
static double c[64]
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:905
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3801
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1008
uint64_t samples_encoded
Definition: ffmpeg.h:460
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1238
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:208
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3062
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:903
char * key
Definition: dict.h:87
uint32_t BOOL
static FILE * vstats_file
Definition: ffmpeg.c:115
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:79
AVFrame * last_frame
Definition: ffmpeg.h:403
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:101
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1285
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3721
AVFormatContext * ctx
Definition: ffmpeg.h:341
int pict_type
Definition: ffmpeg.h:466
AVCodec * enc
Definition: ffmpeg.h:400
AVSubtitle subtitle
Definition: ffmpeg.h:298
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:636
int eof_reached
Definition: ffmpeg.h:342
int forced_kf_index
Definition: ffmpeg.h:418
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:820
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:475
char * avfilter
Definition: ffmpeg.h:431
uint8_t * name
Definition: ffmpeg.h:223
char * value
Definition: dict.h:88
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:80
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:372
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:89
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:708
int channels
number of audio channels
Definition: avcodec.h:2263
int top_field_first
Definition: ffmpeg.h:410
OutputFilter ** outputs
Definition: ffmpeg.h:246
InputFile ** input_files
Definition: ffmpeg.c:142
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2515
void av_log_set_flags(int arg)
Definition: log.c:387
Immediately push the frame to the output.
Definition: buffersrc.h:55
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:237
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:219
AVDictionary * bsf_args
Definition: ffmpeg.h:439
AVFormatContext * ctx
Definition: ffmpeg.h:473
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:800
void show_usage(void)
Definition: ffmpeg_opt.c:2873
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:319
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1422
AVDictionary * encoder_opts
Definition: ffmpeg.h:435
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:986
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:113
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4557
int height
Definition: frame.h:220
InputFilter ** inputs
Definition: ffmpeg.h:244
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2259
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:328
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:628
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2265
OutputFile ** output_files
Definition: ffmpeg.c:147
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1705
int copy_tb
Definition: ffmpeg_opt.c:103
static volatile int received_sigterm
Definition: ffmpeg.c:317
#define FFSWAP(type, a, b)
Definition: common.h:84
int discard
Definition: ffmpeg.h:253
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3585
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2050
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:299
int stream_index
Definition: avcodec.h:1425
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:884
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:318
enum AVSubtitleType type
Definition: avcodec.h:3785
int64_t first_pts
Definition: ffmpeg.h:395
int nb_inputs
Definition: ffmpeg.h:245
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:907
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:223
#define DECODING_FOR_OST
Definition: ffmpeg.h:256
int index
Definition: ffmpeg.h:384
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1061
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
uint64_t resample_channel_layout
Definition: ffmpeg.h:292
OSTFinished
Definition: ffmpeg.h:377
This structure stores compressed data.
Definition: avcodec.h:1400
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:959
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2244
int delay
Codec delay.
Definition: avcodec.h:1664
int debug_ts
Definition: ffmpeg_opt.c:104
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3362
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sigterm_handler(int sig)
Definition: ffmpeg.c:324
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1416
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:117
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1559
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:80
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1425
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:799
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
InputStream ** input_streams
Definition: ffmpeg.c:140
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
Definition: ffmpeg.h:371
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:796
struct InputStream::@25 prev_sub
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3236