FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
505  AVFrame *frame;
506  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
507  sizeof(frame), NULL);
508  av_frame_free(&frame);
509  }
510  av_fifo_freep(&fg->inputs[j]->frame_queue);
511  if (fg->inputs[j]->ist->sub2video.sub_queue) {
512  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
513  AVSubtitle sub;
515  &sub, sizeof(sub), NULL);
516  avsubtitle_free(&sub);
517  }
519  }
521  av_freep(&fg->inputs[j]->name);
522  av_freep(&fg->inputs[j]);
523  }
524  av_freep(&fg->inputs);
525  for (j = 0; j < fg->nb_outputs; j++) {
526  av_freep(&fg->outputs[j]->name);
527  av_freep(&fg->outputs[j]->formats);
528  av_freep(&fg->outputs[j]->channel_layouts);
529  av_freep(&fg->outputs[j]->sample_rates);
530  av_freep(&fg->outputs[j]);
531  }
532  av_freep(&fg->outputs);
533  av_freep(&fg->graph_desc);
534 
535  av_freep(&filtergraphs[i]);
536  }
537  av_freep(&filtergraphs);
538 
540 
541  /* close files */
542  for (i = 0; i < nb_output_files; i++) {
543  OutputFile *of = output_files[i];
545  if (!of)
546  continue;
547  s = of->ctx;
548  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
549  avio_closep(&s->pb);
551  av_dict_free(&of->opts);
552 
553  av_freep(&output_files[i]);
554  }
555  for (i = 0; i < nb_output_streams; i++) {
556  OutputStream *ost = output_streams[i];
557 
558  if (!ost)
559  continue;
560 
561  for (j = 0; j < ost->nb_bitstream_filters; j++)
562  av_bsf_free(&ost->bsf_ctx[j]);
563  av_freep(&ost->bsf_ctx);
564 
566  av_frame_free(&ost->last_frame);
567  av_dict_free(&ost->encoder_opts);
568 
569  av_freep(&ost->forced_keyframes);
571  av_freep(&ost->avfilter);
572  av_freep(&ost->logfile_prefix);
573 
575  ost->audio_channels_mapped = 0;
576 
577  av_dict_free(&ost->sws_dict);
578  av_dict_free(&ost->swr_opts);
579 
582 
583  if (ost->muxing_queue) {
584  while (av_fifo_size(ost->muxing_queue)) {
585  AVPacket pkt;
586  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
587  av_packet_unref(&pkt);
588  }
590  }
591 
592  av_freep(&output_streams[i]);
593  }
594 #if HAVE_THREADS
595  free_input_threads();
596 #endif
597  for (i = 0; i < nb_input_files; i++) {
598  avformat_close_input(&input_files[i]->ctx);
599  av_freep(&input_files[i]);
600  }
601  for (i = 0; i < nb_input_streams; i++) {
602  InputStream *ist = input_streams[i];
603 
606  av_dict_free(&ist->decoder_opts);
609  av_freep(&ist->filters);
610  av_freep(&ist->hwaccel_device);
611  av_freep(&ist->dts_buffer);
612 
614 
615  av_freep(&input_streams[i]);
616  }
617 
618  if (vstats_file) {
619  if (fclose(vstats_file))
621  "Error closing vstats file, loss of information possible: %s\n",
622  av_err2str(AVERROR(errno)));
623  }
625 
626  av_freep(&input_streams);
627  av_freep(&input_files);
628  av_freep(&output_streams);
629  av_freep(&output_files);
630 
631  uninit_opts();
632 
634 
635  if (received_sigterm) {
636  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
637  (int) received_sigterm);
638  } else if (ret && atomic_load(&transcode_init_done)) {
639  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
640  }
641  term_exit();
642  ffmpeg_exited = 1;
643 }
644 
646 {
647  AVDictionaryEntry *t = NULL;
648 
649  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
651  }
652 }
653 
655 {
657  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
658  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
659  exit_program(1);
660  }
661 }
662 
663 static void abort_codec_experimental(AVCodec *c, int encoder)
664 {
665  exit_program(1);
666 }
667 
668 static void update_benchmark(const char *fmt, ...)
669 {
670  if (do_benchmark_all) {
672  va_list va;
673  char buf[1024];
674 
675  if (fmt) {
676  va_start(va, fmt);
677  vsnprintf(buf, sizeof(buf), fmt, va);
678  va_end(va);
680  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
681  t.user_usec - current_time.user_usec,
682  t.sys_usec - current_time.sys_usec,
683  t.real_usec - current_time.real_usec, buf);
684  }
685  current_time = t;
686  }
687 }
688 
690 {
691  int i;
692  for (i = 0; i < nb_output_streams; i++) {
693  OutputStream *ost2 = output_streams[i];
694  ost2->finished |= ost == ost2 ? this_stream : others;
695  }
696 }
697 
698 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
699 {
700  AVFormatContext *s = of->ctx;
701  AVStream *st = ost->st;
702  int ret;
703 
704  /*
705  * Audio encoders may split the packets -- #frames in != #packets out.
706  * But there is no reordering, so we can limit the number of output packets
707  * by simply dropping them here.
708  * Counting encoded video frames needs to be done separately because of
709  * reordering, see do_video_out().
710  * Do not count the packet when unqueued because it has been counted when queued.
711  */
712  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
713  if (ost->frame_number >= ost->max_frames) {
714  av_packet_unref(pkt);
715  return;
716  }
717  ost->frame_number++;
718  }
719 
720  if (!of->header_written) {
721  AVPacket tmp_pkt = {0};
722  /* the muxer is not initialized yet, buffer the packet */
723  if (!av_fifo_space(ost->muxing_queue)) {
724  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
725  ost->max_muxing_queue_size);
726  if (new_size <= av_fifo_size(ost->muxing_queue)) {
728  "Too many packets buffered for output stream %d:%d.\n",
729  ost->file_index, ost->st->index);
730  exit_program(1);
731  }
732  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
733  if (ret < 0)
734  exit_program(1);
735  }
736  ret = av_packet_make_refcounted(pkt);
737  if (ret < 0)
738  exit_program(1);
739  av_packet_move_ref(&tmp_pkt, pkt);
740  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
741  return;
742  }
743 
746  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
747 
748  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
749  int i;
751  NULL);
752  ost->quality = sd ? AV_RL32(sd) : -1;
753  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
754 
755  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
756  if (sd && i < sd[5])
757  ost->error[i] = AV_RL64(sd + 8 + 8*i);
758  else
759  ost->error[i] = -1;
760  }
761 
762  if (ost->frame_rate.num && ost->is_cfr) {
763  if (pkt->duration > 0)
764  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
765  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
766  ost->mux_timebase);
767  }
768  }
769 
770  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
771 
772  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
773  if (pkt->dts != AV_NOPTS_VALUE &&
774  pkt->pts != AV_NOPTS_VALUE &&
775  pkt->dts > pkt->pts) {
776  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
777  pkt->dts, pkt->pts,
778  ost->file_index, ost->st->index);
779  pkt->pts =
780  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
781  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
782  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
783  }
785  pkt->dts != AV_NOPTS_VALUE &&
786  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
787  ost->last_mux_dts != AV_NOPTS_VALUE) {
788  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
789  if (pkt->dts < max) {
790  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
791  if (exit_on_error)
792  loglevel = AV_LOG_ERROR;
793  av_log(s, loglevel, "Non-monotonous DTS in output stream "
794  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
795  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
796  if (exit_on_error) {
797  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
798  exit_program(1);
799  }
800  av_log(s, loglevel, "changing to %"PRId64". This may result "
801  "in incorrect timestamps in the output file.\n",
802  max);
803  if (pkt->pts >= pkt->dts)
804  pkt->pts = FFMAX(pkt->pts, max);
805  pkt->dts = max;
806  }
807  }
808  }
809  ost->last_mux_dts = pkt->dts;
810 
811  ost->data_size += pkt->size;
812  ost->packets_written++;
813 
814  pkt->stream_index = ost->index;
815 
816  if (debug_ts) {
817  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
818  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
820  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
821  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
822  pkt->size
823  );
824  }
825 
826  ret = av_interleaved_write_frame(s, pkt);
827  if (ret < 0) {
828  print_error("av_interleaved_write_frame()", ret);
829  main_return_code = 1;
831  }
832  av_packet_unref(pkt);
833 }
834 
836 {
837  OutputFile *of = output_files[ost->file_index];
838 
839  ost->finished |= ENCODER_FINISHED;
840  if (of->shortest) {
841  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
842  of->recording_time = FFMIN(of->recording_time, end);
843  }
844 }
845 
846 /*
847  * Send a single packet to the output, applying any bitstream filters
848  * associated with the output stream. This may result in any number
849  * of packets actually being written, depending on what bitstream
850  * filters are applied. The supplied packet is consumed and will be
851  * blank (as if newly-allocated) when this function returns.
852  *
853  * If eof is set, instead indicate EOF to all bitstream filters and
854  * therefore flush any delayed packets to the output. A blank packet
855  * must be supplied in this case.
856  */
858  OutputStream *ost, int eof)
859 {
860  int ret = 0;
861 
862  /* apply the output bitstream filters, if any */
863  if (ost->nb_bitstream_filters) {
864  int idx;
865 
866  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
867  if (ret < 0)
868  goto finish;
869 
870  eof = 0;
871  idx = 1;
872  while (idx) {
873  /* get a packet from the previous filter up the chain */
874  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
875  if (ret == AVERROR(EAGAIN)) {
876  ret = 0;
877  idx--;
878  continue;
879  } else if (ret == AVERROR_EOF) {
880  eof = 1;
881  } else if (ret < 0)
882  goto finish;
883 
884  /* send it to the next filter down the chain or to the muxer */
885  if (idx < ost->nb_bitstream_filters) {
886  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
887  if (ret < 0)
888  goto finish;
889  idx++;
890  eof = 0;
891  } else if (eof)
892  goto finish;
893  else
894  write_packet(of, pkt, ost, 0);
895  }
896  } else if (!eof)
897  write_packet(of, pkt, ost, 0);
898 
899 finish:
900  if (ret < 0 && ret != AVERROR_EOF) {
901  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
902  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
903  if(exit_on_error)
904  exit_program(1);
905  }
906 }
907 
909 {
910  OutputFile *of = output_files[ost->file_index];
911 
912  if (of->recording_time != INT64_MAX &&
914  AV_TIME_BASE_Q) >= 0) {
915  close_output_stream(ost);
916  return 0;
917  }
918  return 1;
919 }
920 
922  AVFrame *frame)
923 {
924  AVCodecContext *enc = ost->enc_ctx;
925  AVPacket pkt;
926  int ret;
927 
928  av_init_packet(&pkt);
929  pkt.data = NULL;
930  pkt.size = 0;
931 
932  if (!check_recording_time(ost))
933  return;
934 
935  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
936  frame->pts = ost->sync_opts;
937  ost->sync_opts = frame->pts + frame->nb_samples;
938  ost->samples_encoded += frame->nb_samples;
939  ost->frames_encoded++;
940 
941  av_assert0(pkt.size || !pkt.data);
943  if (debug_ts) {
944  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
945  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
946  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
947  enc->time_base.num, enc->time_base.den);
948  }
949 
950  ret = avcodec_send_frame(enc, frame);
951  if (ret < 0)
952  goto error;
953 
954  while (1) {
955  ret = avcodec_receive_packet(enc, &pkt);
956  if (ret == AVERROR(EAGAIN))
957  break;
958  if (ret < 0)
959  goto error;
960 
961  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
962 
963  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
964 
965  if (debug_ts) {
966  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
967  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
968  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
969  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
970  }
971 
972  output_packet(of, &pkt, ost, 0);
973  }
974 
975  return;
976 error:
977  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
978  exit_program(1);
979 }
980 
981 static void do_subtitle_out(OutputFile *of,
982  OutputStream *ost,
983  AVSubtitle *sub)
984 {
985  int subtitle_out_max_size = 1024 * 1024;
986  int subtitle_out_size, nb, i;
987  AVCodecContext *enc;
988  AVPacket pkt;
989  int64_t pts;
990 
991  if (sub->pts == AV_NOPTS_VALUE) {
992  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
993  if (exit_on_error)
994  exit_program(1);
995  return;
996  }
997 
998  enc = ost->enc_ctx;
999 
1000  if (!subtitle_out) {
1001  subtitle_out = av_malloc(subtitle_out_max_size);
1002  if (!subtitle_out) {
1003  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1004  exit_program(1);
1005  }
1006  }
1007 
1008  /* Note: DVB subtitle need one packet to draw them and one other
1009  packet to clear them */
1010  /* XXX: signal it in the codec context ? */
1011  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1012  nb = 2;
1013  else
1014  nb = 1;
1015 
1016  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1017  pts = sub->pts;
1018  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1019  pts -= output_files[ost->file_index]->start_time;
1020  for (i = 0; i < nb; i++) {
1021  unsigned save_num_rects = sub->num_rects;
1022 
1023  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1024  if (!check_recording_time(ost))
1025  return;
1026 
1027  sub->pts = pts;
1028  // start_display_time is required to be 0
1029  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1030  sub->end_display_time -= sub->start_display_time;
1031  sub->start_display_time = 0;
1032  if (i == 1)
1033  sub->num_rects = 0;
1034 
1035  ost->frames_encoded++;
1036 
1037  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1038  subtitle_out_max_size, sub);
1039  if (i == 1)
1040  sub->num_rects = save_num_rects;
1041  if (subtitle_out_size < 0) {
1042  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1043  exit_program(1);
1044  }
1045 
1046  av_init_packet(&pkt);
1047  pkt.data = subtitle_out;
1048  pkt.size = subtitle_out_size;
1049  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1050  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1051  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1052  /* XXX: the pts correction is handled here. Maybe handling
1053  it in the codec would be better */
1054  if (i == 0)
1055  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1056  else
1057  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1058  }
1059  pkt.dts = pkt.pts;
1060  output_packet(of, &pkt, ost, 0);
1061  }
1062 }
1063 
1064 static void do_video_out(OutputFile *of,
1065  OutputStream *ost,
1066  AVFrame *next_picture,
1067  double sync_ipts)
1068 {
1069  int ret, format_video_sync;
1070  AVPacket pkt;
1071  AVCodecContext *enc = ost->enc_ctx;
1072  AVCodecParameters *mux_par = ost->st->codecpar;
1073  AVRational frame_rate;
1074  int nb_frames, nb0_frames, i;
1075  double delta, delta0;
1076  double duration = 0;
1077  int frame_size = 0;
1078  InputStream *ist = NULL;
1080 
1081  if (ost->source_index >= 0)
1082  ist = input_streams[ost->source_index];
1083 
1084  frame_rate = av_buffersink_get_frame_rate(filter);
1085  if (frame_rate.num > 0 && frame_rate.den > 0)
1086  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1087 
1088  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1089  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1090 
1091  if (!ost->filters_script &&
1092  !ost->filters &&
1093  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1094  next_picture &&
1095  ist &&
1096  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1097  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1098  }
1099 
1100  if (!next_picture) {
1101  //end, flushing
1102  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1103  ost->last_nb0_frames[1],
1104  ost->last_nb0_frames[2]);
1105  } else {
1106  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1107  delta = delta0 + duration;
1108 
1109  /* by default, we output a single frame */
1110  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1111  nb_frames = 1;
1112 
1113  format_video_sync = video_sync_method;
1114  if (format_video_sync == VSYNC_AUTO) {
1115  if(!strcmp(of->ctx->oformat->name, "avi")) {
1116  format_video_sync = VSYNC_VFR;
1117  } else
1118  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1119  if ( ist
1120  && format_video_sync == VSYNC_CFR
1121  && input_files[ist->file_index]->ctx->nb_streams == 1
1122  && input_files[ist->file_index]->input_ts_offset == 0) {
1123  format_video_sync = VSYNC_VSCFR;
1124  }
1125  if (format_video_sync == VSYNC_CFR && copy_ts) {
1126  format_video_sync = VSYNC_VSCFR;
1127  }
1128  }
1129  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1130 
1131  if (delta0 < 0 &&
1132  delta > 0 &&
1133  format_video_sync != VSYNC_PASSTHROUGH &&
1134  format_video_sync != VSYNC_DROP) {
1135  if (delta0 < -0.6) {
1136  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1137  } else
1138  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1139  sync_ipts = ost->sync_opts;
1140  duration += delta0;
1141  delta0 = 0;
1142  }
1143 
1144  switch (format_video_sync) {
1145  case VSYNC_VSCFR:
1146  if (ost->frame_number == 0 && delta0 >= 0.5) {
1147  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1148  delta = duration;
1149  delta0 = 0;
1150  ost->sync_opts = lrint(sync_ipts);
1151  }
1152  case VSYNC_CFR:
1153  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1154  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1155  nb_frames = 0;
1156  } else if (delta < -1.1)
1157  nb_frames = 0;
1158  else if (delta > 1.1) {
1159  nb_frames = lrintf(delta);
1160  if (delta0 > 1.1)
1161  nb0_frames = lrintf(delta0 - 0.6);
1162  }
1163  break;
1164  case VSYNC_VFR:
1165  if (delta <= -0.6)
1166  nb_frames = 0;
1167  else if (delta > 0.6)
1168  ost->sync_opts = lrint(sync_ipts);
1169  break;
1170  case VSYNC_DROP:
1171  case VSYNC_PASSTHROUGH:
1172  ost->sync_opts = lrint(sync_ipts);
1173  break;
1174  default:
1175  av_assert0(0);
1176  }
1177  }
1178 
1179  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1180  nb0_frames = FFMIN(nb0_frames, nb_frames);
1181 
1182  memmove(ost->last_nb0_frames + 1,
1183  ost->last_nb0_frames,
1184  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1185  ost->last_nb0_frames[0] = nb0_frames;
1186 
1187  if (nb0_frames == 0 && ost->last_dropped) {
1188  nb_frames_drop++;
1190  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1191  ost->frame_number, ost->st->index, ost->last_frame->pts);
1192  }
1193  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1194  if (nb_frames > dts_error_threshold * 30) {
1195  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1196  nb_frames_drop++;
1197  return;
1198  }
1199  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1200  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1201  if (nb_frames_dup > dup_warning) {
1202  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1203  dup_warning *= 10;
1204  }
1205  }
1206  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1207 
1208  /* duplicates frame if needed */
1209  for (i = 0; i < nb_frames; i++) {
1210  AVFrame *in_picture;
1211  int forced_keyframe = 0;
1212  double pts_time;
1213  av_init_packet(&pkt);
1214  pkt.data = NULL;
1215  pkt.size = 0;
1216 
1217  if (i < nb0_frames && ost->last_frame) {
1218  in_picture = ost->last_frame;
1219  } else
1220  in_picture = next_picture;
1221 
1222  if (!in_picture)
1223  return;
1224 
1225  in_picture->pts = ost->sync_opts;
1226 
1227  if (!check_recording_time(ost))
1228  return;
1229 
1231  ost->top_field_first >= 0)
1232  in_picture->top_field_first = !!ost->top_field_first;
1233 
1234  if (in_picture->interlaced_frame) {
1235  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1236  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1237  else
1238  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1239  } else
1240  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1241 
1242  in_picture->quality = enc->global_quality;
1243  in_picture->pict_type = 0;
1244 
1245  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1246  in_picture->pts != AV_NOPTS_VALUE)
1247  ost->forced_kf_ref_pts = in_picture->pts;
1248 
1249  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1250  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1251  if (ost->forced_kf_index < ost->forced_kf_count &&
1252  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1253  ost->forced_kf_index++;
1254  forced_keyframe = 1;
1255  } else if (ost->forced_keyframes_pexpr) {
1256  double res;
1257  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1260  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1266  res);
1267  if (res) {
1268  forced_keyframe = 1;
1274  }
1275 
1277  } else if ( ost->forced_keyframes
1278  && !strncmp(ost->forced_keyframes, "source", 6)
1279  && in_picture->key_frame==1
1280  && !i) {
1281  forced_keyframe = 1;
1282  }
1283 
1284  if (forced_keyframe) {
1285  in_picture->pict_type = AV_PICTURE_TYPE_I;
1286  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1287  }
1288 
1290  if (debug_ts) {
1291  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1292  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1293  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1294  enc->time_base.num, enc->time_base.den);
1295  }
1296 
1297  ost->frames_encoded++;
1298 
1299  ret = avcodec_send_frame(enc, in_picture);
1300  if (ret < 0)
1301  goto error;
1302  // Make sure Closed Captions will not be duplicated
1304 
1305  while (1) {
1306  ret = avcodec_receive_packet(enc, &pkt);
1307  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1308  if (ret == AVERROR(EAGAIN))
1309  break;
1310  if (ret < 0)
1311  goto error;
1312 
1313  if (debug_ts) {
1314  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1315  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1316  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1317  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1318  }
1319 
1320  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1321  pkt.pts = ost->sync_opts;
1322 
1323  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1324 
1325  if (debug_ts) {
1326  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1327  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1328  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1329  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1330  }
1331 
1332  frame_size = pkt.size;
1333  output_packet(of, &pkt, ost, 0);
1334 
1335  /* if two pass, output log */
1336  if (ost->logfile && enc->stats_out) {
1337  fprintf(ost->logfile, "%s", enc->stats_out);
1338  }
1339  }
1340  ost->sync_opts++;
1341  /*
1342  * For video, number of frames in == number of packets out.
1343  * But there may be reordering, so we can't throw away frames on encoder
1344  * flush, we need to limit them here, before they go into encoder.
1345  */
1346  ost->frame_number++;
1347 
1348  if (vstats_filename && frame_size)
1349  do_video_stats(ost, frame_size);
1350  }
1351 
1352  if (!ost->last_frame)
1353  ost->last_frame = av_frame_alloc();
1354  av_frame_unref(ost->last_frame);
1355  if (next_picture && ost->last_frame)
1356  av_frame_ref(ost->last_frame, next_picture);
1357  else
1358  av_frame_free(&ost->last_frame);
1359 
1360  return;
1361 error:
1362  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1363  exit_program(1);
1364 }
1365 
1366 static double psnr(double d)
1367 {
1368  return -10.0 * log10(d);
1369 }
1370 
1372 {
1373  AVCodecContext *enc;
1374  int frame_number;
1375  double ti1, bitrate, avg_bitrate;
1376 
1377  /* this is executed just the first time do_video_stats is called */
1378  if (!vstats_file) {
1379  vstats_file = fopen(vstats_filename, "w");
1380  if (!vstats_file) {
1381  perror("fopen");
1382  exit_program(1);
1383  }
1384  }
1385 
1386  enc = ost->enc_ctx;
1387  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1388  frame_number = ost->st->nb_frames;
1389  if (vstats_version <= 1) {
1390  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1391  ost->quality / (float)FF_QP2LAMBDA);
1392  } else {
1393  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1394  ost->quality / (float)FF_QP2LAMBDA);
1395  }
1396 
1397  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1398  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1399 
1400  fprintf(vstats_file,"f_size= %6d ", frame_size);
1401  /* compute pts value */
1402  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1403  if (ti1 < 0.01)
1404  ti1 = 0.01;
1405 
1406  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1407  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1408  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1409  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1410  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1411  }
1412 }
1413 
1414 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1415 
1417 {
1418  OutputFile *of = output_files[ost->file_index];
1419  int i;
1420 
1422 
1423  if (of->shortest) {
1424  for (i = 0; i < of->ctx->nb_streams; i++)
1425  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1426  }
1427 }
1428 
1429 /**
1430  * Get and encode new output from any of the filtergraphs, without causing
1431  * activity.
1432  *
1433  * @return 0 for success, <0 for severe errors
1434  */
1435 static int reap_filters(int flush)
1436 {
1437  AVFrame *filtered_frame = NULL;
1438  int i;
1439 
1440  /* Reap all buffers present in the buffer sinks */
1441  for (i = 0; i < nb_output_streams; i++) {
1442  OutputStream *ost = output_streams[i];
1443  OutputFile *of = output_files[ost->file_index];
1445  AVCodecContext *enc = ost->enc_ctx;
1446  int ret = 0;
1447 
1448  if (!ost->filter || !ost->filter->graph->graph)
1449  continue;
1450  filter = ost->filter->filter;
1451 
1452  if (!ost->initialized) {
1453  char error[1024] = "";
1454  ret = init_output_stream(ost, error, sizeof(error));
1455  if (ret < 0) {
1456  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1457  ost->file_index, ost->index, error);
1458  exit_program(1);
1459  }
1460  }
1461 
1462  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1463  return AVERROR(ENOMEM);
1464  }
1465  filtered_frame = ost->filtered_frame;
1466 
1467  while (1) {
1468  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1469  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1471  if (ret < 0) {
1472  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1474  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1475  } else if (flush && ret == AVERROR_EOF) {
1477  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1478  }
1479  break;
1480  }
1481  if (ost->finished) {
1482  av_frame_unref(filtered_frame);
1483  continue;
1484  }
1485  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1486  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1487  AVRational filter_tb = av_buffersink_get_time_base(filter);
1488  AVRational tb = enc->time_base;
1489  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1490 
1491  tb.den <<= extra_bits;
1492  float_pts =
1493  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1494  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1495  float_pts /= 1 << extra_bits;
1496  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1497  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1498 
1499  filtered_frame->pts =
1500  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1501  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1502  }
1503 
1504  switch (av_buffersink_get_type(filter)) {
1505  case AVMEDIA_TYPE_VIDEO:
1506  if (!ost->frame_aspect_ratio.num)
1507  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1508 
1509  if (debug_ts) {
1510  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1511  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1512  float_pts,
1513  enc->time_base.num, enc->time_base.den);
1514  }
1515 
1516  do_video_out(of, ost, filtered_frame, float_pts);
1517  break;
1518  case AVMEDIA_TYPE_AUDIO:
1519  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1520  enc->channels != filtered_frame->channels) {
1522  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1523  break;
1524  }
1525  do_audio_out(of, ost, filtered_frame);
1526  break;
1527  default:
1528  // TODO support subtitle filters
1529  av_assert0(0);
1530  }
1531 
1532  av_frame_unref(filtered_frame);
1533  }
1534  }
1535 
1536  return 0;
1537 }
1538 
1539 static void print_final_stats(int64_t total_size)
1540 {
1541  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1542  uint64_t subtitle_size = 0;
1543  uint64_t data_size = 0;
1544  float percent = -1.0;
1545  int i, j;
1546  int pass1_used = 1;
1547 
1548  for (i = 0; i < nb_output_streams; i++) {
1549  OutputStream *ost = output_streams[i];
1550  switch (ost->enc_ctx->codec_type) {
1551  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1552  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1553  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1554  default: other_size += ost->data_size; break;
1555  }
1556  extra_size += ost->enc_ctx->extradata_size;
1557  data_size += ost->data_size;
1560  pass1_used = 0;
1561  }
1562 
1563  if (data_size && total_size>0 && total_size >= data_size)
1564  percent = 100.0 * (total_size - data_size) / data_size;
1565 
1566  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1567  video_size / 1024.0,
1568  audio_size / 1024.0,
1569  subtitle_size / 1024.0,
1570  other_size / 1024.0,
1571  extra_size / 1024.0);
1572  if (percent >= 0.0)
1573  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1574  else
1575  av_log(NULL, AV_LOG_INFO, "unknown");
1576  av_log(NULL, AV_LOG_INFO, "\n");
1577 
1578  /* print verbose per-stream stats */
1579  for (i = 0; i < nb_input_files; i++) {
1580  InputFile *f = input_files[i];
1581  uint64_t total_packets = 0, total_size = 0;
1582 
1583  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1584  i, f->ctx->url);
1585 
1586  for (j = 0; j < f->nb_streams; j++) {
1587  InputStream *ist = input_streams[f->ist_index + j];
1588  enum AVMediaType type = ist->dec_ctx->codec_type;
1589 
1590  total_size += ist->data_size;
1591  total_packets += ist->nb_packets;
1592 
1593  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1594  i, j, media_type_string(type));
1595  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1596  ist->nb_packets, ist->data_size);
1597 
1598  if (ist->decoding_needed) {
1599  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1600  ist->frames_decoded);
1601  if (type == AVMEDIA_TYPE_AUDIO)
1602  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1603  av_log(NULL, AV_LOG_VERBOSE, "; ");
1604  }
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, "\n");
1607  }
1608 
1609  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1610  total_packets, total_size);
1611  }
1612 
1613  for (i = 0; i < nb_output_files; i++) {
1614  OutputFile *of = output_files[i];
1615  uint64_t total_packets = 0, total_size = 0;
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1618  i, of->ctx->url);
1619 
1620  for (j = 0; j < of->ctx->nb_streams; j++) {
1621  OutputStream *ost = output_streams[of->ost_index + j];
1622  enum AVMediaType type = ost->enc_ctx->codec_type;
1623 
1624  total_size += ost->data_size;
1625  total_packets += ost->packets_written;
1626 
1627  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1628  i, j, media_type_string(type));
1629  if (ost->encoding_needed) {
1630  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1631  ost->frames_encoded);
1632  if (type == AVMEDIA_TYPE_AUDIO)
1633  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1634  av_log(NULL, AV_LOG_VERBOSE, "; ");
1635  }
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1638  ost->packets_written, ost->data_size);
1639 
1640  av_log(NULL, AV_LOG_VERBOSE, "\n");
1641  }
1642 
1643  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1644  total_packets, total_size);
1645  }
1646  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1647  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1648  if (pass1_used) {
1649  av_log(NULL, AV_LOG_WARNING, "\n");
1650  } else {
1651  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1652  }
1653  }
1654 }
1655 
1656 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1657 {
1658  AVBPrint buf, buf_script;
1659  OutputStream *ost;
1660  AVFormatContext *oc;
1661  int64_t total_size;
1662  AVCodecContext *enc;
1663  int frame_number, vid, i;
1664  double bitrate;
1665  double speed;
1666  int64_t pts = INT64_MIN + 1;
1667  static int64_t last_time = -1;
1668  static int qp_histogram[52];
1669  int hours, mins, secs, us;
1670  const char *hours_sign;
1671  int ret;
1672  float t;
1673 
1674  if (!print_stats && !is_last_report && !progress_avio)
1675  return;
1676 
1677  if (!is_last_report) {
1678  if (last_time == -1) {
1679  last_time = cur_time;
1680  return;
1681  }
1682  if ((cur_time - last_time) < 500000)
1683  return;
1684  last_time = cur_time;
1685  }
1686 
1687  t = (cur_time-timer_start) / 1000000.0;
1688 
1689 
1690  oc = output_files[0]->ctx;
1691 
1692  total_size = avio_size(oc->pb);
1693  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1694  total_size = avio_tell(oc->pb);
1695 
1696  vid = 0;
1698  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1699  for (i = 0; i < nb_output_streams; i++) {
1700  float q = -1;
1701  ost = output_streams[i];
1702  enc = ost->enc_ctx;
1703  if (!ost->stream_copy)
1704  q = ost->quality / (float) FF_QP2LAMBDA;
1705 
1706  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1707  av_bprintf(&buf, "q=%2.1f ", q);
1708  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1709  ost->file_index, ost->index, q);
1710  }
1711  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1712  float fps;
1713 
1714  frame_number = ost->frame_number;
1715  fps = t > 1 ? frame_number / t : 0;
1716  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1717  frame_number, fps < 9.95, fps, q);
1718  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1719  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1720  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1721  ost->file_index, ost->index, q);
1722  if (is_last_report)
1723  av_bprintf(&buf, "L");
1724  if (qp_hist) {
1725  int j;
1726  int qp = lrintf(q);
1727  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1728  qp_histogram[qp]++;
1729  for (j = 0; j < 32; j++)
1730  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1731  }
1732 
1733  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1734  int j;
1735  double error, error_sum = 0;
1736  double scale, scale_sum = 0;
1737  double p;
1738  char type[3] = { 'Y','U','V' };
1739  av_bprintf(&buf, "PSNR=");
1740  for (j = 0; j < 3; j++) {
1741  if (is_last_report) {
1742  error = enc->error[j];
1743  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1744  } else {
1745  error = ost->error[j];
1746  scale = enc->width * enc->height * 255.0 * 255.0;
1747  }
1748  if (j)
1749  scale /= 4;
1750  error_sum += error;
1751  scale_sum += scale;
1752  p = psnr(error / scale);
1753  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1754  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1755  ost->file_index, ost->index, type[j] | 32, p);
1756  }
1757  p = psnr(error_sum / scale_sum);
1758  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1759  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1760  ost->file_index, ost->index, p);
1761  }
1762  vid = 1;
1763  }
1764  /* compute min output value */
1766  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1767  ost->st->time_base, AV_TIME_BASE_Q));
1768  if (is_last_report)
1769  nb_frames_drop += ost->last_dropped;
1770  }
1771 
1772  secs = FFABS(pts) / AV_TIME_BASE;
1773  us = FFABS(pts) % AV_TIME_BASE;
1774  mins = secs / 60;
1775  secs %= 60;
1776  hours = mins / 60;
1777  mins %= 60;
1778  hours_sign = (pts < 0) ? "-" : "";
1779 
1780  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1781  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1782 
1783  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1784  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1785  if (pts == AV_NOPTS_VALUE) {
1786  av_bprintf(&buf, "N/A ");
1787  } else {
1788  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1789  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1790  }
1791 
1792  if (bitrate < 0) {
1793  av_bprintf(&buf, "bitrate=N/A");
1794  av_bprintf(&buf_script, "bitrate=N/A\n");
1795  }else{
1796  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1797  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1798  }
1799 
1800  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1801  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1802  if (pts == AV_NOPTS_VALUE) {
1803  av_bprintf(&buf_script, "out_time_us=N/A\n");
1804  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1805  av_bprintf(&buf_script, "out_time=N/A\n");
1806  } else {
1807  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1808  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1809  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1810  hours_sign, hours, mins, secs, us);
1811  }
1812 
1814  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1815  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1816  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1817 
1818  if (speed < 0) {
1819  av_bprintf(&buf, " speed=N/A");
1820  av_bprintf(&buf_script, "speed=N/A\n");
1821  } else {
1822  av_bprintf(&buf, " speed=%4.3gx", speed);
1823  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1824  }
1825 
1826  if (print_stats || is_last_report) {
1827  const char end = is_last_report ? '\n' : '\r';
1828  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1829  fprintf(stderr, "%s %c", buf.str, end);
1830  } else
1831  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1832 
1833  fflush(stderr);
1834  }
1835  av_bprint_finalize(&buf, NULL);
1836 
1837  if (progress_avio) {
1838  av_bprintf(&buf_script, "progress=%s\n",
1839  is_last_report ? "end" : "continue");
1840  avio_write(progress_avio, buf_script.str,
1841  FFMIN(buf_script.len, buf_script.size - 1));
1842  avio_flush(progress_avio);
1843  av_bprint_finalize(&buf_script, NULL);
1844  if (is_last_report) {
1845  if ((ret = avio_closep(&progress_avio)) < 0)
1847  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1848  }
1849  }
1850 
1851  if (is_last_report)
1852  print_final_stats(total_size);
1853 }
1854 
1856 {
1857  // We never got any input. Set a fake format, which will
1858  // come from libavformat.
1859  ifilter->format = par->format;
1860  ifilter->sample_rate = par->sample_rate;
1861  ifilter->channels = par->channels;
1862  ifilter->channel_layout = par->channel_layout;
1863  ifilter->width = par->width;
1864  ifilter->height = par->height;
1865  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1866 }
1867 
1868 static void flush_encoders(void)
1869 {
1870  int i, ret;
1871 
1872  for (i = 0; i < nb_output_streams; i++) {
1873  OutputStream *ost = output_streams[i];
1874  AVCodecContext *enc = ost->enc_ctx;
1875  OutputFile *of = output_files[ost->file_index];
1876 
1877  if (!ost->encoding_needed)
1878  continue;
1879 
1880  // Try to enable encoding with no input frames.
1881  // Maybe we should just let encoding fail instead.
1882  if (!ost->initialized) {
1883  FilterGraph *fg = ost->filter->graph;
1884  char error[1024] = "";
1885 
1887  "Finishing stream %d:%d without any data written to it.\n",
1888  ost->file_index, ost->st->index);
1889 
1890  if (ost->filter && !fg->graph) {
1891  int x;
1892  for (x = 0; x < fg->nb_inputs; x++) {
1893  InputFilter *ifilter = fg->inputs[x];
1894  if (ifilter->format < 0)
1895  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1896  }
1897 
1899  continue;
1900 
1901  ret = configure_filtergraph(fg);
1902  if (ret < 0) {
1903  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1904  exit_program(1);
1905  }
1906 
1907  finish_output_stream(ost);
1908  }
1909 
1910  ret = init_output_stream(ost, error, sizeof(error));
1911  if (ret < 0) {
1912  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1913  ost->file_index, ost->index, error);
1914  exit_program(1);
1915  }
1916  }
1917 
1918  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1919  continue;
1920 
1922  continue;
1923 
1924  for (;;) {
1925  const char *desc = NULL;
1926  AVPacket pkt;
1927  int pkt_size;
1928 
1929  switch (enc->codec_type) {
1930  case AVMEDIA_TYPE_AUDIO:
1931  desc = "audio";
1932  break;
1933  case AVMEDIA_TYPE_VIDEO:
1934  desc = "video";
1935  break;
1936  default:
1937  av_assert0(0);
1938  }
1939 
1940  av_init_packet(&pkt);
1941  pkt.data = NULL;
1942  pkt.size = 0;
1943 
1945 
1946  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1947  ret = avcodec_send_frame(enc, NULL);
1948  if (ret < 0) {
1949  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1950  desc,
1951  av_err2str(ret));
1952  exit_program(1);
1953  }
1954  }
1955 
1956  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1957  if (ret < 0 && ret != AVERROR_EOF) {
1958  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1959  desc,
1960  av_err2str(ret));
1961  exit_program(1);
1962  }
1963  if (ost->logfile && enc->stats_out) {
1964  fprintf(ost->logfile, "%s", enc->stats_out);
1965  }
1966  if (ret == AVERROR_EOF) {
1967  output_packet(of, &pkt, ost, 1);
1968  break;
1969  }
1970  if (ost->finished & MUXER_FINISHED) {
1971  av_packet_unref(&pkt);
1972  continue;
1973  }
1974  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1975  pkt_size = pkt.size;
1976  output_packet(of, &pkt, ost, 0);
1978  do_video_stats(ost, pkt_size);
1979  }
1980  }
1981  }
1982 }
1983 
1984 /*
1985  * Check whether a packet from ist should be written into ost at this time
1986  */
1988 {
1989  OutputFile *of = output_files[ost->file_index];
1990  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1991 
1992  if (ost->source_index != ist_index)
1993  return 0;
1994 
1995  if (ost->finished)
1996  return 0;
1997 
1998  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1999  return 0;
2000 
2001  return 1;
2002 }
2003 
2004 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2005 {
2006  OutputFile *of = output_files[ost->file_index];
2007  InputFile *f = input_files [ist->file_index];
2008  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2009  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2010  AVPacket opkt;
2011 
2012  // EOF: flush output bitstream filters.
2013  if (!pkt) {
2014  av_init_packet(&opkt);
2015  opkt.data = NULL;
2016  opkt.size = 0;
2017  output_packet(of, &opkt, ost, 1);
2018  return;
2019  }
2020 
2021  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2023  return;
2024 
2025  if (!ost->frame_number && !ost->copy_prior_start) {
2026  int64_t comp_start = start_time;
2027  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2028  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2029  if (pkt->pts == AV_NOPTS_VALUE ?
2030  ist->pts < comp_start :
2031  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2032  return;
2033  }
2034 
2035  if (of->recording_time != INT64_MAX &&
2036  ist->pts >= of->recording_time + start_time) {
2037  close_output_stream(ost);
2038  return;
2039  }
2040 
2041  if (f->recording_time != INT64_MAX) {
2042  start_time = f->ctx->start_time;
2043  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2044  start_time += f->start_time;
2045  if (ist->pts >= f->recording_time + start_time) {
2046  close_output_stream(ost);
2047  return;
2048  }
2049  }
2050 
2051  /* force the input stream PTS */
2052  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2053  ost->sync_opts++;
2054 
2055  if (av_packet_ref(&opkt, pkt) < 0)
2056  exit_program(1);
2057 
2058  if (pkt->pts != AV_NOPTS_VALUE)
2059  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2060 
2061  if (pkt->dts == AV_NOPTS_VALUE) {
2062  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2063  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2065  if(!duration)
2066  duration = ist->dec_ctx->frame_size;
2067  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2068  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2070  /* dts will be set immediately afterwards to what pts is now */
2071  opkt.pts = opkt.dts - ost_tb_start_time;
2072  } else
2073  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2074  opkt.dts -= ost_tb_start_time;
2075 
2076  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2077 
2078  output_packet(of, &opkt, ost, 0);
2079 }
2080 
2082 {
2083  AVCodecContext *dec = ist->dec_ctx;
2084 
2085  if (!dec->channel_layout) {
2086  char layout_name[256];
2087 
2088  if (dec->channels > ist->guess_layout_max)
2089  return 0;
2091  if (!dec->channel_layout)
2092  return 0;
2093  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2094  dec->channels, dec->channel_layout);
2095  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2096  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2097  }
2098  return 1;
2099 }
2100 
2101 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2102 {
2103  if (*got_output || ret<0)
2104  decode_error_stat[ret<0] ++;
2105 
2106  if (ret < 0 && exit_on_error)
2107  exit_program(1);
2108 
2109  if (*got_output && ist) {
2112  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2113  if (exit_on_error)
2114  exit_program(1);
2115  }
2116  }
2117 }
2118 
2119 // Filters can be configured only if the formats of all inputs are known.
2121 {
2122  int i;
2123  for (i = 0; i < fg->nb_inputs; i++) {
2124  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2125  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2126  return 0;
2127  }
2128  return 1;
2129 }
2130 
2132 {
2133  FilterGraph *fg = ifilter->graph;
2134  int need_reinit, ret, i;
2135 
2136  /* determine if the parameters for this input changed */
2137  need_reinit = ifilter->format != frame->format;
2138 
2139  switch (ifilter->ist->st->codecpar->codec_type) {
2140  case AVMEDIA_TYPE_AUDIO:
2141  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2142  ifilter->channels != frame->channels ||
2143  ifilter->channel_layout != frame->channel_layout;
2144  break;
2145  case AVMEDIA_TYPE_VIDEO:
2146  need_reinit |= ifilter->width != frame->width ||
2147  ifilter->height != frame->height;
2148  break;
2149  }
2150 
2151  if (!ifilter->ist->reinit_filters && fg->graph)
2152  need_reinit = 0;
2153 
2154  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2155  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2156  need_reinit = 1;
2157 
2158  if (need_reinit) {
2159  ret = ifilter_parameters_from_frame(ifilter, frame);
2160  if (ret < 0)
2161  return ret;
2162  }
2163 
2164  /* (re)init the graph if possible, otherwise buffer the frame and return */
2165  if (need_reinit || !fg->graph) {
2166  for (i = 0; i < fg->nb_inputs; i++) {
2167  if (!ifilter_has_all_input_formats(fg)) {
2168  AVFrame *tmp = av_frame_clone(frame);
2169  if (!tmp)
2170  return AVERROR(ENOMEM);
2171  av_frame_unref(frame);
2172 
2173  if (!av_fifo_space(ifilter->frame_queue)) {
2174  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2175  if (ret < 0) {
2176  av_frame_free(&tmp);
2177  return ret;
2178  }
2179  }
2180  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2181  return 0;
2182  }
2183  }
2184 
2185  ret = reap_filters(1);
2186  if (ret < 0 && ret != AVERROR_EOF) {
2187  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2188  return ret;
2189  }
2190 
2191  ret = configure_filtergraph(fg);
2192  if (ret < 0) {
2193  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2194  return ret;
2195  }
2196  }
2197 
2199  if (ret < 0) {
2200  if (ret != AVERROR_EOF)
2201  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2202  return ret;
2203  }
2204 
2205  return 0;
2206 }
2207 
2208 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2209 {
2210  int ret;
2211 
2212  ifilter->eof = 1;
2213 
2214  if (ifilter->filter) {
2215  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2216  if (ret < 0)
2217  return ret;
2218  } else {
2219  // the filtergraph was never configured
2220  if (ifilter->format < 0)
2221  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2222  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2223  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2224  return AVERROR_INVALIDDATA;
2225  }
2226  }
2227 
2228  return 0;
2229 }
2230 
2231 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2232 // There is the following difference: if you got a frame, you must call
2233 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2234 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2235 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2236 {
2237  int ret;
2238 
2239  *got_frame = 0;
2240 
2241  if (pkt) {
2242  ret = avcodec_send_packet(avctx, pkt);
2243  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2244  // decoded frames with avcodec_receive_frame() until done.
2245  if (ret < 0 && ret != AVERROR_EOF)
2246  return ret;
2247  }
2248 
2249  ret = avcodec_receive_frame(avctx, frame);
2250  if (ret < 0 && ret != AVERROR(EAGAIN))
2251  return ret;
2252  if (ret >= 0)
2253  *got_frame = 1;
2254 
2255  return 0;
2256 }
2257 
2258 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2259 {
2260  int i, ret;
2261  AVFrame *f;
2262 
2263  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2264  for (i = 0; i < ist->nb_filters; i++) {
2265  if (i < ist->nb_filters - 1) {
2266  f = ist->filter_frame;
2267  ret = av_frame_ref(f, decoded_frame);
2268  if (ret < 0)
2269  break;
2270  } else
2271  f = decoded_frame;
2272  ret = ifilter_send_frame(ist->filters[i], f);
2273  if (ret == AVERROR_EOF)
2274  ret = 0; /* ignore */
2275  if (ret < 0) {
2277  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2278  break;
2279  }
2280  }
2281  return ret;
2282 }
2283 
2284 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2285  int *decode_failed)
2286 {
2287  AVFrame *decoded_frame;
2288  AVCodecContext *avctx = ist->dec_ctx;
2289  int ret, err = 0;
2290  AVRational decoded_frame_tb;
2291 
2292  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2293  return AVERROR(ENOMEM);
2294  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2295  return AVERROR(ENOMEM);
2296  decoded_frame = ist->decoded_frame;
2297 
2299  ret = decode(avctx, decoded_frame, got_output, pkt);
2300  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2301  if (ret < 0)
2302  *decode_failed = 1;
2303 
2304  if (ret >= 0 && avctx->sample_rate <= 0) {
2305  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2306  ret = AVERROR_INVALIDDATA;
2307  }
2308 
2309  if (ret != AVERROR_EOF)
2310  check_decode_result(ist, got_output, ret);
2311 
2312  if (!*got_output || ret < 0)
2313  return ret;
2314 
2315  ist->samples_decoded += decoded_frame->nb_samples;
2316  ist->frames_decoded++;
2317 
2318  /* increment next_dts to use for the case where the input stream does not
2319  have timestamps or there are multiple frames in the packet */
2320  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2321  avctx->sample_rate;
2322  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2323  avctx->sample_rate;
2324 
2325  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2326  decoded_frame_tb = ist->st->time_base;
2327  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2328  decoded_frame->pts = pkt->pts;
2329  decoded_frame_tb = ist->st->time_base;
2330  }else {
2331  decoded_frame->pts = ist->dts;
2332  decoded_frame_tb = AV_TIME_BASE_Q;
2333  }
2334  if (decoded_frame->pts != AV_NOPTS_VALUE)
2335  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2336  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2337  (AVRational){1, avctx->sample_rate});
2338  ist->nb_samples = decoded_frame->nb_samples;
2339  err = send_frame_to_filters(ist, decoded_frame);
2340 
2342  av_frame_unref(decoded_frame);
2343  return err < 0 ? err : ret;
2344 }
2345 
2346 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2347  int *decode_failed)
2348 {
2349  AVFrame *decoded_frame;
2350  int i, ret = 0, err = 0;
2351  int64_t best_effort_timestamp;
2352  int64_t dts = AV_NOPTS_VALUE;
2353  AVPacket avpkt;
2354 
2355  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2356  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2357  // skip the packet.
2358  if (!eof && pkt && pkt->size == 0)
2359  return 0;
2360 
2361  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2362  return AVERROR(ENOMEM);
2363  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2364  return AVERROR(ENOMEM);
2365  decoded_frame = ist->decoded_frame;
2366  if (ist->dts != AV_NOPTS_VALUE)
2367  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2368  if (pkt) {
2369  avpkt = *pkt;
2370  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2371  }
2372 
2373  // The old code used to set dts on the drain packet, which does not work
2374  // with the new API anymore.
2375  if (eof) {
2376  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2377  if (!new)
2378  return AVERROR(ENOMEM);
2379  ist->dts_buffer = new;
2380  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2381  }
2382 
2384  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2385  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2386  if (ret < 0)
2387  *decode_failed = 1;
2388 
2389  // The following line may be required in some cases where there is no parser
2390  // or the parser does not has_b_frames correctly
2391  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2392  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2393  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2394  } else
2396  "video_delay is larger in decoder than demuxer %d > %d.\n"
2397  "If you want to help, upload a sample "
2398  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2399  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2400  ist->dec_ctx->has_b_frames,
2401  ist->st->codecpar->video_delay);
2402  }
2403 
2404  if (ret != AVERROR_EOF)
2405  check_decode_result(ist, got_output, ret);
2406 
2407  if (*got_output && ret >= 0) {
2408  if (ist->dec_ctx->width != decoded_frame->width ||
2409  ist->dec_ctx->height != decoded_frame->height ||
2410  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2411  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2412  decoded_frame->width,
2413  decoded_frame->height,
2414  decoded_frame->format,
2415  ist->dec_ctx->width,
2416  ist->dec_ctx->height,
2417  ist->dec_ctx->pix_fmt);
2418  }
2419  }
2420 
2421  if (!*got_output || ret < 0)
2422  return ret;
2423 
2424  if(ist->top_field_first>=0)
2425  decoded_frame->top_field_first = ist->top_field_first;
2426 
2427  ist->frames_decoded++;
2428 
2429  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2430  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2431  if (err < 0)
2432  goto fail;
2433  }
2434  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2435 
2436  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2437  *duration_pts = decoded_frame->pkt_duration;
2438 
2439  if (ist->framerate.num)
2440  best_effort_timestamp = ist->cfr_next_pts++;
2441 
2442  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2443  best_effort_timestamp = ist->dts_buffer[0];
2444 
2445  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2446  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2447  ist->nb_dts_buffer--;
2448  }
2449 
2450  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2451  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2452 
2453  if (ts != AV_NOPTS_VALUE)
2454  ist->next_pts = ist->pts = ts;
2455  }
2456 
2457  if (debug_ts) {
2458  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2459  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2460  ist->st->index, av_ts2str(decoded_frame->pts),
2461  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2462  best_effort_timestamp,
2463  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2464  decoded_frame->key_frame, decoded_frame->pict_type,
2465  ist->st->time_base.num, ist->st->time_base.den);
2466  }
2467 
2468  if (ist->st->sample_aspect_ratio.num)
2469  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2470 
2471  err = send_frame_to_filters(ist, decoded_frame);
2472 
2473 fail:
2475  av_frame_unref(decoded_frame);
2476  return err < 0 ? err : ret;
2477 }
2478 
2479 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2480  int *decode_failed)
2481 {
2482  AVSubtitle subtitle;
2483  int free_sub = 1;
2484  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2485  &subtitle, got_output, pkt);
2486 
2487  check_decode_result(NULL, got_output, ret);
2488 
2489  if (ret < 0 || !*got_output) {
2490  *decode_failed = 1;
2491  if (!pkt->size)
2492  sub2video_flush(ist);
2493  return ret;
2494  }
2495 
2496  if (ist->fix_sub_duration) {
2497  int end = 1;
2498  if (ist->prev_sub.got_output) {
2499  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2500  1000, AV_TIME_BASE);
2501  if (end < ist->prev_sub.subtitle.end_display_time) {
2502  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2503  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2505  end <= 0 ? ", dropping it" : "");
2507  }
2508  }
2509  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2510  FFSWAP(int, ret, ist->prev_sub.ret);
2511  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2512  if (end <= 0)
2513  goto out;
2514  }
2515 
2516  if (!*got_output)
2517  return ret;
2518 
2519  if (ist->sub2video.frame) {
2520  sub2video_update(ist, INT64_MIN, &subtitle);
2521  } else if (ist->nb_filters) {
2522  if (!ist->sub2video.sub_queue)
2523  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2524  if (!ist->sub2video.sub_queue)
2525  exit_program(1);
2526  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2528  if (ret < 0)
2529  exit_program(1);
2530  }
2531  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2532  free_sub = 0;
2533  }
2534 
2535  if (!subtitle.num_rects)
2536  goto out;
2537 
2538  ist->frames_decoded++;
2539 
2540  for (i = 0; i < nb_output_streams; i++) {
2541  OutputStream *ost = output_streams[i];
2542 
2543  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2544  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2545  continue;
2546 
2547  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2548  }
2549 
2550 out:
2551  if (free_sub)
2552  avsubtitle_free(&subtitle);
2553  return ret;
2554 }
2555 
2557 {
2558  int i, ret;
2559  /* TODO keep pts also in stream time base to avoid converting back */
2560  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2562 
2563  for (i = 0; i < ist->nb_filters; i++) {
2564  ret = ifilter_send_eof(ist->filters[i], pts);
2565  if (ret < 0)
2566  return ret;
2567  }
2568  return 0;
2569 }
2570 
2571 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2572 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2573 {
2574  int ret = 0, i;
2575  int repeating = 0;
2576  int eof_reached = 0;
2577 
2578  AVPacket avpkt;
2579  if (!ist->saw_first_ts) {
2580  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2581  ist->pts = 0;
2582  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2583  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2584  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2585  }
2586  ist->saw_first_ts = 1;
2587  }
2588 
2589  if (ist->next_dts == AV_NOPTS_VALUE)
2590  ist->next_dts = ist->dts;
2591  if (ist->next_pts == AV_NOPTS_VALUE)
2592  ist->next_pts = ist->pts;
2593 
2594  if (!pkt) {
2595  /* EOF handling */
2596  av_init_packet(&avpkt);
2597  avpkt.data = NULL;
2598  avpkt.size = 0;
2599  } else {
2600  avpkt = *pkt;
2601  }
2602 
2603  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2604  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2605  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2606  ist->next_pts = ist->pts = ist->dts;
2607  }
2608 
2609  // while we have more to decode or while the decoder did output something on EOF
2610  while (ist->decoding_needed) {
2611  int64_t duration_dts = 0;
2612  int64_t duration_pts = 0;
2613  int got_output = 0;
2614  int decode_failed = 0;
2615 
2616  ist->pts = ist->next_pts;
2617  ist->dts = ist->next_dts;
2618 
2619  switch (ist->dec_ctx->codec_type) {
2620  case AVMEDIA_TYPE_AUDIO:
2621  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2622  &decode_failed);
2623  break;
2624  case AVMEDIA_TYPE_VIDEO:
2625  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2626  &decode_failed);
2627  if (!repeating || !pkt || got_output) {
2628  if (pkt && pkt->duration) {
2629  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2630  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2632  duration_dts = ((int64_t)AV_TIME_BASE *
2633  ist->dec_ctx->framerate.den * ticks) /
2635  }
2636 
2637  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2638  ist->next_dts += duration_dts;
2639  }else
2640  ist->next_dts = AV_NOPTS_VALUE;
2641  }
2642 
2643  if (got_output) {
2644  if (duration_pts > 0) {
2645  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2646  } else {
2647  ist->next_pts += duration_dts;
2648  }
2649  }
2650  break;
2651  case AVMEDIA_TYPE_SUBTITLE:
2652  if (repeating)
2653  break;
2654  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2655  if (!pkt && ret >= 0)
2656  ret = AVERROR_EOF;
2657  break;
2658  default:
2659  return -1;
2660  }
2661 
2662  if (ret == AVERROR_EOF) {
2663  eof_reached = 1;
2664  break;
2665  }
2666 
2667  if (ret < 0) {
2668  if (decode_failed) {
2669  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2670  ist->file_index, ist->st->index, av_err2str(ret));
2671  } else {
2672  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2673  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2674  }
2675  if (!decode_failed || exit_on_error)
2676  exit_program(1);
2677  break;
2678  }
2679 
2680  if (got_output)
2681  ist->got_output = 1;
2682 
2683  if (!got_output)
2684  break;
2685 
2686  // During draining, we might get multiple output frames in this loop.
2687  // ffmpeg.c does not drain the filter chain on configuration changes,
2688  // which means if we send multiple frames at once to the filters, and
2689  // one of those frames changes configuration, the buffered frames will
2690  // be lost. This can upset certain FATE tests.
2691  // Decode only 1 frame per call on EOF to appease these FATE tests.
2692  // The ideal solution would be to rewrite decoding to use the new
2693  // decoding API in a better way.
2694  if (!pkt)
2695  break;
2696 
2697  repeating = 1;
2698  }
2699 
2700  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2701  /* except when looping we need to flush but not to send an EOF */
2702  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2703  int ret = send_filter_eof(ist);
2704  if (ret < 0) {
2705  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2706  exit_program(1);
2707  }
2708  }
2709 
2710  /* handle stream copy */
2711  if (!ist->decoding_needed && pkt) {
2712  ist->dts = ist->next_dts;
2713  switch (ist->dec_ctx->codec_type) {
2714  case AVMEDIA_TYPE_AUDIO:
2715  av_assert1(pkt->duration >= 0);
2716  if (ist->dec_ctx->sample_rate) {
2717  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2718  ist->dec_ctx->sample_rate;
2719  } else {
2720  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2721  }
2722  break;
2723  case AVMEDIA_TYPE_VIDEO:
2724  if (ist->framerate.num) {
2725  // TODO: Remove work-around for c99-to-c89 issue 7
2726  AVRational time_base_q = AV_TIME_BASE_Q;
2727  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2728  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2729  } else if (pkt->duration) {
2730  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2731  } else if(ist->dec_ctx->framerate.num != 0) {
2732  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2733  ist->next_dts += ((int64_t)AV_TIME_BASE *
2734  ist->dec_ctx->framerate.den * ticks) /
2736  }
2737  break;
2738  }
2739  ist->pts = ist->dts;
2740  ist->next_pts = ist->next_dts;
2741  }
2742  for (i = 0; i < nb_output_streams; i++) {
2743  OutputStream *ost = output_streams[i];
2744 
2745  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2746  continue;
2747 
2748  do_streamcopy(ist, ost, pkt);
2749  }
2750 
2751  return !eof_reached;
2752 }
2753 
2754 static void print_sdp(void)
2755 {
2756  char sdp[16384];
2757  int i;
2758  int j;
2759  AVIOContext *sdp_pb;
2760  AVFormatContext **avc;
2761 
2762  for (i = 0; i < nb_output_files; i++) {
2763  if (!output_files[i]->header_written)
2764  return;
2765  }
2766 
2767  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2768  if (!avc)
2769  exit_program(1);
2770  for (i = 0, j = 0; i < nb_output_files; i++) {
2771  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2772  avc[j] = output_files[i]->ctx;
2773  j++;
2774  }
2775  }
2776 
2777  if (!j)
2778  goto fail;
2779 
2780  av_sdp_create(avc, j, sdp, sizeof(sdp));
2781 
2782  if (!sdp_filename) {
2783  printf("SDP:\n%s\n", sdp);
2784  fflush(stdout);
2785  } else {
2786  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2787  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2788  } else {
2789  avio_print(sdp_pb, sdp);
2790  avio_closep(&sdp_pb);
2792  }
2793  }
2794 
2795 fail:
2796  av_freep(&avc);
2797 }
2798 
2800 {
2801  InputStream *ist = s->opaque;
2802  const enum AVPixelFormat *p;
2803  int ret;
2804 
2805  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2807  const AVCodecHWConfig *config = NULL;
2808  int i;
2809 
2810  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2811  break;
2812 
2813  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2814  ist->hwaccel_id == HWACCEL_AUTO) {
2815  for (i = 0;; i++) {
2816  config = avcodec_get_hw_config(s->codec, i);
2817  if (!config)
2818  break;
2819  if (!(config->methods &
2821  continue;
2822  if (config->pix_fmt == *p)
2823  break;
2824  }
2825  }
2826  if (config) {
2827  if (config->device_type != ist->hwaccel_device_type) {
2828  // Different hwaccel offered, ignore.
2829  continue;
2830  }
2831 
2832  ret = hwaccel_decode_init(s);
2833  if (ret < 0) {
2834  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2836  "%s hwaccel requested for input stream #%d:%d, "
2837  "but cannot be initialized.\n",
2839  ist->file_index, ist->st->index);
2840  return AV_PIX_FMT_NONE;
2841  }
2842  continue;
2843  }
2844  } else {
2845  const HWAccel *hwaccel = NULL;
2846  int i;
2847  for (i = 0; hwaccels[i].name; i++) {
2848  if (hwaccels[i].pix_fmt == *p) {
2849  hwaccel = &hwaccels[i];
2850  break;
2851  }
2852  }
2853  if (!hwaccel) {
2854  // No hwaccel supporting this pixfmt.
2855  continue;
2856  }
2857  if (hwaccel->id != ist->hwaccel_id) {
2858  // Does not match requested hwaccel.
2859  continue;
2860  }
2861 
2862  ret = hwaccel->init(s);
2863  if (ret < 0) {
2865  "%s hwaccel requested for input stream #%d:%d, "
2866  "but cannot be initialized.\n", hwaccel->name,
2867  ist->file_index, ist->st->index);
2868  return AV_PIX_FMT_NONE;
2869  }
2870  }
2871 
2872  if (ist->hw_frames_ctx) {
2874  if (!s->hw_frames_ctx)
2875  return AV_PIX_FMT_NONE;
2876  }
2877 
2878  ist->hwaccel_pix_fmt = *p;
2879  break;
2880  }
2881 
2882  return *p;
2883 }
2884 
2886 {
2887  InputStream *ist = s->opaque;
2888 
2889  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2890  return ist->hwaccel_get_buffer(s, frame, flags);
2891 
2892  return avcodec_default_get_buffer2(s, frame, flags);
2893 }
2894 
2895 static int init_input_stream(int ist_index, char *error, int error_len)
2896 {
2897  int ret;
2898  InputStream *ist = input_streams[ist_index];
2899 
2900  if (ist->decoding_needed) {
2901  AVCodec *codec = ist->dec;
2902  if (!codec) {
2903  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2904  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2905  return AVERROR(EINVAL);
2906  }
2907 
2908  ist->dec_ctx->opaque = ist;
2909  ist->dec_ctx->get_format = get_format;
2910  ist->dec_ctx->get_buffer2 = get_buffer;
2911  ist->dec_ctx->thread_safe_callbacks = 1;
2912 
2913  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2914  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2915  (ist->decoding_needed & DECODING_FOR_OST)) {
2916  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2918  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2919  }
2920 
2921  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2922 
2923  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2924  * audio, and video decoders such as cuvid or mediacodec */
2925  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2926 
2927  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2928  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2929  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2931  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2932 
2933  ret = hw_device_setup_for_decode(ist);
2934  if (ret < 0) {
2935  snprintf(error, error_len, "Device setup failed for "
2936  "decoder on input stream #%d:%d : %s",
2937  ist->file_index, ist->st->index, av_err2str(ret));
2938  return ret;
2939  }
2940 
2941  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2942  if (ret == AVERROR_EXPERIMENTAL)
2943  abort_codec_experimental(codec, 0);
2944 
2945  snprintf(error, error_len,
2946  "Error while opening decoder for input stream "
2947  "#%d:%d : %s",
2948  ist->file_index, ist->st->index, av_err2str(ret));
2949  return ret;
2950  }
2952  }
2953 
2954  ist->next_pts = AV_NOPTS_VALUE;
2955  ist->next_dts = AV_NOPTS_VALUE;
2956 
2957  return 0;
2958 }
2959 
2961 {
2962  if (ost->source_index >= 0)
2963  return input_streams[ost->source_index];
2964  return NULL;
2965 }
2966 
2967 static int compare_int64(const void *a, const void *b)
2968 {
2969  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2970 }
2971 
2972 /* open the muxer when all the streams are initialized */
2973 static int check_init_output_file(OutputFile *of, int file_index)
2974 {
2975  int ret, i;
2976 
2977  for (i = 0; i < of->ctx->nb_streams; i++) {
2978  OutputStream *ost = output_streams[of->ost_index + i];
2979  if (!ost->initialized)
2980  return 0;
2981  }
2982 
2983  of->ctx->interrupt_callback = int_cb;
2984 
2985  ret = avformat_write_header(of->ctx, &of->opts);
2986  if (ret < 0) {
2988  "Could not write header for output file #%d "
2989  "(incorrect codec parameters ?): %s\n",
2990  file_index, av_err2str(ret));
2991  return ret;
2992  }
2993  //assert_avoptions(of->opts);
2994  of->header_written = 1;
2995 
2996  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2997 
2998  if (sdp_filename || want_sdp)
2999  print_sdp();
3000 
3001  /* flush the muxing queues */
3002  for (i = 0; i < of->ctx->nb_streams; i++) {
3003  OutputStream *ost = output_streams[of->ost_index + i];
3004 
3005  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3006  if (!av_fifo_size(ost->muxing_queue))
3007  ost->mux_timebase = ost->st->time_base;
3008 
3009  while (av_fifo_size(ost->muxing_queue)) {
3010  AVPacket pkt;
3011  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3012  write_packet(of, &pkt, ost, 1);
3013  }
3014  }
3015 
3016  return 0;
3017 }
3018 
3020 {
3021  AVBSFContext *ctx;
3022  int i, ret;
3023 
3024  if (!ost->nb_bitstream_filters)
3025  return 0;
3026 
3027  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3028  ctx = ost->bsf_ctx[i];
3029 
3030  ret = avcodec_parameters_copy(ctx->par_in,
3031  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3032  if (ret < 0)
3033  return ret;
3034 
3035  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3036 
3037  ret = av_bsf_init(ctx);
3038  if (ret < 0) {
3039  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3040  ost->bsf_ctx[i]->filter->name);
3041  return ret;
3042  }
3043  }
3044 
3045  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3046  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3047  if (ret < 0)
3048  return ret;
3049 
3050  ost->st->time_base = ctx->time_base_out;
3051 
3052  return 0;
3053 }
3054 
3056 {
3057  OutputFile *of = output_files[ost->file_index];
3058  InputStream *ist = get_input_stream(ost);
3059  AVCodecParameters *par_dst = ost->st->codecpar;
3060  AVCodecParameters *par_src = ost->ref_par;
3061  AVRational sar;
3062  int i, ret;
3063  uint32_t codec_tag = par_dst->codec_tag;
3064 
3065  av_assert0(ist && !ost->filter);
3066 
3067  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3068  if (ret >= 0)
3069  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3070  if (ret < 0) {
3072  "Error setting up codec context options.\n");
3073  return ret;
3074  }
3075 
3076  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3077  if (ret < 0) {
3079  "Error getting reference codec parameters.\n");
3080  return ret;
3081  }
3082 
3083  if (!codec_tag) {
3084  unsigned int codec_tag_tmp;
3085  if (!of->ctx->oformat->codec_tag ||
3086  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3087  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3088  codec_tag = par_src->codec_tag;
3089  }
3090 
3091  ret = avcodec_parameters_copy(par_dst, par_src);
3092  if (ret < 0)
3093  return ret;
3094 
3095  par_dst->codec_tag = codec_tag;
3096 
3097  if (!ost->frame_rate.num)
3098  ost->frame_rate = ist->framerate;
3099  ost->st->avg_frame_rate = ost->frame_rate;
3100 
3102  if (ret < 0)
3103  return ret;
3104 
3105  // copy timebase while removing common factors
3106  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3108 
3109  // copy estimated duration as a hint to the muxer
3110  if (ost->st->duration <= 0 && ist->st->duration > 0)
3111  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3112 
3113  // copy disposition
3114  ost->st->disposition = ist->st->disposition;
3115 
3116  if (ist->st->nb_side_data) {
3117  for (i = 0; i < ist->st->nb_side_data; i++) {
3118  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3119  uint8_t *dst_data;
3120 
3121  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3122  if (!dst_data)
3123  return AVERROR(ENOMEM);
3124  memcpy(dst_data, sd_src->data, sd_src->size);
3125  }
3126  }
3127 
3128  if (ost->rotate_overridden) {
3130  sizeof(int32_t) * 9);
3131  if (sd)
3133  }
3134 
3135  switch (par_dst->codec_type) {
3136  case AVMEDIA_TYPE_AUDIO:
3137  if (audio_volume != 256) {
3138  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3139  exit_program(1);
3140  }
3141  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3142  par_dst->block_align= 0;
3143  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3144  par_dst->block_align= 0;
3145  break;
3146  case AVMEDIA_TYPE_VIDEO:
3147  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3148  sar =
3150  (AVRational){ par_dst->height, par_dst->width });
3151  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3152  "with stream copy may produce invalid files\n");
3153  }
3154  else if (ist->st->sample_aspect_ratio.num)
3155  sar = ist->st->sample_aspect_ratio;
3156  else
3157  sar = par_src->sample_aspect_ratio;
3158  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3159  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3160  ost->st->r_frame_rate = ist->st->r_frame_rate;
3161  break;
3162  }
3163 
3164  ost->mux_timebase = ist->st->time_base;
3165 
3166  return 0;
3167 }
3168 
3170 {
3171  AVDictionaryEntry *e;
3172 
3173  uint8_t *encoder_string;
3174  int encoder_string_len;
3175  int format_flags = 0;
3176  int codec_flags = ost->enc_ctx->flags;
3177 
3178  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3179  return;
3180 
3181  e = av_dict_get(of->opts, "fflags", NULL, 0);
3182  if (e) {
3183  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3184  if (!o)
3185  return;
3186  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3187  }
3188  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3189  if (e) {
3190  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3191  if (!o)
3192  return;
3193  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3194  }
3195 
3196  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3197  encoder_string = av_mallocz(encoder_string_len);
3198  if (!encoder_string)
3199  exit_program(1);
3200 
3201  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3202  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3203  else
3204  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3205  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3206  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3208 }
3209 
3210 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3211  AVCodecContext *avctx)
3212 {
3213  char *p;
3214  int n = 1, i, size, index = 0;
3215  int64_t t, *pts;
3216 
3217  for (p = kf; *p; p++)
3218  if (*p == ',')
3219  n++;
3220  size = n;
3221  pts = av_malloc_array(size, sizeof(*pts));
3222  if (!pts) {
3223  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3224  exit_program(1);
3225  }
3226 
3227  p = kf;
3228  for (i = 0; i < n; i++) {
3229  char *next = strchr(p, ',');
3230 
3231  if (next)
3232  *next++ = 0;
3233 
3234  if (!memcmp(p, "chapters", 8)) {
3235 
3236  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3237  int j;
3238 
3239  if (avf->nb_chapters > INT_MAX - size ||
3240  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3241  sizeof(*pts)))) {
3243  "Could not allocate forced key frames array.\n");
3244  exit_program(1);
3245  }
3246  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3247  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3248 
3249  for (j = 0; j < avf->nb_chapters; j++) {
3250  AVChapter *c = avf->chapters[j];
3251  av_assert1(index < size);
3252  pts[index++] = av_rescale_q(c->start, c->time_base,
3253  avctx->time_base) + t;
3254  }
3255 
3256  } else {
3257 
3258  t = parse_time_or_die("force_key_frames", p, 1);
3259  av_assert1(index < size);
3260  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3261 
3262  }
3263 
3264  p = next;
3265  }
3266 
3267  av_assert0(index == size);
3268  qsort(pts, size, sizeof(*pts), compare_int64);
3269  ost->forced_kf_count = size;
3270  ost->forced_kf_pts = pts;
3271 }
3272 
3273 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3274 {
3275  InputStream *ist = get_input_stream(ost);
3276  AVCodecContext *enc_ctx = ost->enc_ctx;
3277  AVFormatContext *oc;
3278 
3279  if (ost->enc_timebase.num > 0) {
3280  enc_ctx->time_base = ost->enc_timebase;
3281  return;
3282  }
3283 
3284  if (ost->enc_timebase.num < 0) {
3285  if (ist) {
3286  enc_ctx->time_base = ist->st->time_base;
3287  return;
3288  }
3289 
3290  oc = output_files[ost->file_index]->ctx;
3291  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3292  }
3293 
3294  enc_ctx->time_base = default_time_base;
3295 }
3296 
3298 {
3299  InputStream *ist = get_input_stream(ost);
3300  AVCodecContext *enc_ctx = ost->enc_ctx;
3302  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3303  int j, ret;
3304 
3305  set_encoder_id(output_files[ost->file_index], ost);
3306 
3307  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3308  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3309  // which have to be filtered out to prevent leaking them to output files.
3310  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3311 
3312  if (ist) {
3313  ost->st->disposition = ist->st->disposition;
3314 
3315  dec_ctx = ist->dec_ctx;
3316 
3317  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3318  } else {
3319  for (j = 0; j < oc->nb_streams; j++) {
3320  AVStream *st = oc->streams[j];
3321  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3322  break;
3323  }
3324  if (j == oc->nb_streams)
3325  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3328  }
3329 
3330  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3331  if (!ost->frame_rate.num)
3333  if (ist && !ost->frame_rate.num)
3334  ost->frame_rate = ist->framerate;
3335  if (ist && !ost->frame_rate.num)
3336  ost->frame_rate = ist->st->r_frame_rate;
3337  if (ist && !ost->frame_rate.num) {
3338  ost->frame_rate = (AVRational){25, 1};
3340  "No information "
3341  "about the input framerate is available. Falling "
3342  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3343  "if you want a different framerate.\n",
3344  ost->file_index, ost->index);
3345  }
3346 
3347  if (ost->enc->supported_framerates && !ost->force_fps) {
3348  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3349  ost->frame_rate = ost->enc->supported_framerates[idx];
3350  }
3351  // reduce frame rate for mpeg4 to be within the spec limits
3352  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3353  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3354  ost->frame_rate.num, ost->frame_rate.den, 65535);
3355  }
3356  }
3357 
3358  switch (enc_ctx->codec_type) {
3359  case AVMEDIA_TYPE_AUDIO:
3361  if (dec_ctx)
3362  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3363  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3367 
3368  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3369  break;
3370 
3371  case AVMEDIA_TYPE_VIDEO:
3373 
3374  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3376  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3378  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3379  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3380  }
3381 
3382  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3383  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3384  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3385  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3386  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3388 
3389  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3390  if (dec_ctx)
3391  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3392  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3393 
3394  enc_ctx->framerate = ost->frame_rate;
3395 
3396  ost->st->avg_frame_rate = ost->frame_rate;
3397 
3398  if (!dec_ctx ||
3399  enc_ctx->width != dec_ctx->width ||
3400  enc_ctx->height != dec_ctx->height ||
3401  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3403  }
3404 
3405  if (ost->top_field_first == 0) {
3406  enc_ctx->field_order = AV_FIELD_BB;
3407  } else if (ost->top_field_first == 1) {
3408  enc_ctx->field_order = AV_FIELD_TT;
3409  }
3410 
3411  if (ost->forced_keyframes) {
3412  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3415  if (ret < 0) {
3417  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3418  return ret;
3419  }
3424 
3425  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3426  // parse it only for static kf timings
3427  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3429  }
3430  }
3431  break;
3432  case AVMEDIA_TYPE_SUBTITLE:
3433  enc_ctx->time_base = AV_TIME_BASE_Q;
3434  if (!enc_ctx->width) {
3435  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3436  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3437  }
3438  break;
3439  case AVMEDIA_TYPE_DATA:
3440  break;
3441  default:
3442  abort();
3443  break;
3444  }
3445 
3446  ost->mux_timebase = enc_ctx->time_base;
3447 
3448  return 0;
3449 }
3450 
3451 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3452 {
3453  int ret = 0;
3454 
3455  if (ost->encoding_needed) {
3456  AVCodec *codec = ost->enc;
3457  AVCodecContext *dec = NULL;
3458  InputStream *ist;
3459 
3460  ret = init_output_stream_encode(ost);
3461  if (ret < 0)
3462  return ret;
3463 
3464  if ((ist = get_input_stream(ost)))
3465  dec = ist->dec_ctx;
3466  if (dec && dec->subtitle_header) {
3467  /* ASS code assumes this buffer is null terminated so add extra byte. */
3469  if (!ost->enc_ctx->subtitle_header)
3470  return AVERROR(ENOMEM);
3471  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3473  }
3474  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3475  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3476  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3477  !codec->defaults &&
3478  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3479  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3480  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3481 
3482  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3486  if (!ost->enc_ctx->hw_frames_ctx)
3487  return AVERROR(ENOMEM);
3488  } else {
3489  ret = hw_device_setup_for_encode(ost);
3490  if (ret < 0) {
3491  snprintf(error, error_len, "Device setup failed for "
3492  "encoder on output stream #%d:%d : %s",
3493  ost->file_index, ost->index, av_err2str(ret));
3494  return ret;
3495  }
3496  }
3497  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3498  int input_props = 0, output_props = 0;
3499  AVCodecDescriptor const *input_descriptor =
3501  AVCodecDescriptor const *output_descriptor =
3503  if (input_descriptor)
3504  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3505  if (output_descriptor)
3506  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3507  if (input_props && output_props && input_props != output_props) {
3508  snprintf(error, error_len,
3509  "Subtitle encoding currently only possible from text to text "
3510  "or bitmap to bitmap");
3511  return AVERROR_INVALIDDATA;
3512  }
3513  }
3514 
3515  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3516  if (ret == AVERROR_EXPERIMENTAL)
3517  abort_codec_experimental(codec, 1);
3518  snprintf(error, error_len,
3519  "Error while opening encoder for output stream #%d:%d - "
3520  "maybe incorrect parameters such as bit_rate, rate, width or height",
3521  ost->file_index, ost->index);
3522  return ret;
3523  }
3524  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3525  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3527  ost->enc_ctx->frame_size);
3529  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3530  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3531  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3532  " It takes bits/s as argument, not kbits/s\n");
3533 
3535  if (ret < 0) {
3537  "Error initializing the output stream codec context.\n");
3538  exit_program(1);
3539  }
3540  /*
3541  * FIXME: ost->st->codec should't be needed here anymore.
3542  */
3543  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3544  if (ret < 0)
3545  return ret;
3546 
3547  if (ost->enc_ctx->nb_coded_side_data) {
3548  int i;
3549 
3550  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3551  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3552  uint8_t *dst_data;
3553 
3554  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3555  if (!dst_data)
3556  return AVERROR(ENOMEM);
3557  memcpy(dst_data, sd_src->data, sd_src->size);
3558  }
3559  }
3560 
3561  /*
3562  * Add global input side data. For now this is naive, and copies it
3563  * from the input stream's global side data. All side data should
3564  * really be funneled over AVFrame and libavfilter, then added back to
3565  * packet side data, and then potentially using the first packet for
3566  * global side data.
3567  */
3568  if (ist) {
3569  int i;
3570  for (i = 0; i < ist->st->nb_side_data; i++) {
3571  AVPacketSideData *sd = &ist->st->side_data[i];
3572  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3573  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3574  if (!dst)
3575  return AVERROR(ENOMEM);
3576  memcpy(dst, sd->data, sd->size);
3577  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3578  av_display_rotation_set((uint32_t *)dst, 0);
3579  }
3580  }
3581  }
3582 
3583  // copy timebase while removing common factors
3584  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3585  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3586 
3587  // copy estimated duration as a hint to the muxer
3588  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3589  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3590 
3591  ost->st->codec->codec= ost->enc_ctx->codec;
3592  } else if (ost->stream_copy) {
3593  ret = init_output_stream_streamcopy(ost);
3594  if (ret < 0)
3595  return ret;
3596  }
3597 
3598  // parse user provided disposition, and update stream values
3599  if (ost->disposition) {
3600  static const AVOption opts[] = {
3601  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3602  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3603  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3604  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3605  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3606  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3607  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3608  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3609  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3610  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3611  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3612  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3613  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3614  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3615  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3616  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3617  { NULL },
3618  };
3619  static const AVClass class = {
3620  .class_name = "",
3621  .item_name = av_default_item_name,
3622  .option = opts,
3623  .version = LIBAVUTIL_VERSION_INT,
3624  };
3625  const AVClass *pclass = &class;
3626 
3627  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3628  if (ret < 0)
3629  return ret;
3630  }
3631 
3632  /* initialize bitstream filters for the output stream
3633  * needs to be done here, because the codec id for streamcopy is not
3634  * known until now */
3635  ret = init_output_bsfs(ost);
3636  if (ret < 0)
3637  return ret;
3638 
3639  ost->initialized = 1;
3640 
3641  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3642  if (ret < 0)
3643  return ret;
3644 
3645  return ret;
3646 }
3647 
3648 static void report_new_stream(int input_index, AVPacket *pkt)
3649 {
3650  InputFile *file = input_files[input_index];
3651  AVStream *st = file->ctx->streams[pkt->stream_index];
3652 
3653  if (pkt->stream_index < file->nb_streams_warn)
3654  return;
3655  av_log(file->ctx, AV_LOG_WARNING,
3656  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3658  input_index, pkt->stream_index,
3659  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3660  file->nb_streams_warn = pkt->stream_index + 1;
3661 }
3662 
3663 static int transcode_init(void)
3664 {
3665  int ret = 0, i, j, k;
3666  AVFormatContext *oc;
3667  OutputStream *ost;
3668  InputStream *ist;
3669  char error[1024] = {0};
3670 
3671  for (i = 0; i < nb_filtergraphs; i++) {
3672  FilterGraph *fg = filtergraphs[i];
3673  for (j = 0; j < fg->nb_outputs; j++) {
3674  OutputFilter *ofilter = fg->outputs[j];
3675  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3676  continue;
3677  if (fg->nb_inputs != 1)
3678  continue;
3679  for (k = nb_input_streams-1; k >= 0 ; k--)
3680  if (fg->inputs[0]->ist == input_streams[k])
3681  break;
3682  ofilter->ost->source_index = k;
3683  }
3684  }
3685 
3686  /* init framerate emulation */
3687  for (i = 0; i < nb_input_files; i++) {
3688  InputFile *ifile = input_files[i];
3689  if (ifile->rate_emu)
3690  for (j = 0; j < ifile->nb_streams; j++)
3691  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3692  }
3693 
3694  /* init input streams */
3695  for (i = 0; i < nb_input_streams; i++)
3696  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3697  for (i = 0; i < nb_output_streams; i++) {
3698  ost = output_streams[i];
3699  avcodec_close(ost->enc_ctx);
3700  }
3701  goto dump_format;
3702  }
3703 
3704  /* open each encoder */
3705  for (i = 0; i < nb_output_streams; i++) {
3706  // skip streams fed from filtergraphs until we have a frame for them
3707  if (output_streams[i]->filter)
3708  continue;
3709 
3710  ret = init_output_stream(output_streams[i], error, sizeof(error));
3711  if (ret < 0)
3712  goto dump_format;
3713  }
3714 
3715  /* discard unused programs */
3716  for (i = 0; i < nb_input_files; i++) {
3717  InputFile *ifile = input_files[i];
3718  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3719  AVProgram *p = ifile->ctx->programs[j];
3720  int discard = AVDISCARD_ALL;
3721 
3722  for (k = 0; k < p->nb_stream_indexes; k++)
3723  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3724  discard = AVDISCARD_DEFAULT;
3725  break;
3726  }
3727  p->discard = discard;
3728  }
3729  }
3730 
3731  /* write headers for files with no streams */
3732  for (i = 0; i < nb_output_files; i++) {
3733  oc = output_files[i]->ctx;
3734  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3735  ret = check_init_output_file(output_files[i], i);
3736  if (ret < 0)
3737  goto dump_format;
3738  }
3739  }
3740 
3741  dump_format:
3742  /* dump the stream mapping */
3743  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3744  for (i = 0; i < nb_input_streams; i++) {
3745  ist = input_streams[i];
3746 
3747  for (j = 0; j < ist->nb_filters; j++) {
3748  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3749  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3750  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3751  ist->filters[j]->name);
3752  if (nb_filtergraphs > 1)
3753  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3754  av_log(NULL, AV_LOG_INFO, "\n");
3755  }
3756  }
3757  }
3758 
3759  for (i = 0; i < nb_output_streams; i++) {
3760  ost = output_streams[i];
3761 
3762  if (ost->attachment_filename) {
3763  /* an attached file */
3764  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3765  ost->attachment_filename, ost->file_index, ost->index);
3766  continue;
3767  }
3768 
3769  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3770  /* output from a complex graph */
3771  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3772  if (nb_filtergraphs > 1)
3773  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3774 
3775  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3776  ost->index, ost->enc ? ost->enc->name : "?");
3777  continue;
3778  }
3779 
3780  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3781  input_streams[ost->source_index]->file_index,
3782  input_streams[ost->source_index]->st->index,
3783  ost->file_index,
3784  ost->index);
3785  if (ost->sync_ist != input_streams[ost->source_index])
3786  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3787  ost->sync_ist->file_index,
3788  ost->sync_ist->st->index);
3789  if (ost->stream_copy)
3790  av_log(NULL, AV_LOG_INFO, " (copy)");
3791  else {
3792  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3793  const AVCodec *out_codec = ost->enc;
3794  const char *decoder_name = "?";
3795  const char *in_codec_name = "?";
3796  const char *encoder_name = "?";
3797  const char *out_codec_name = "?";
3798  const AVCodecDescriptor *desc;
3799 
3800  if (in_codec) {
3801  decoder_name = in_codec->name;
3802  desc = avcodec_descriptor_get(in_codec->id);
3803  if (desc)
3804  in_codec_name = desc->name;
3805  if (!strcmp(decoder_name, in_codec_name))
3806  decoder_name = "native";
3807  }
3808 
3809  if (out_codec) {
3810  encoder_name = out_codec->name;
3811  desc = avcodec_descriptor_get(out_codec->id);
3812  if (desc)
3813  out_codec_name = desc->name;
3814  if (!strcmp(encoder_name, out_codec_name))
3815  encoder_name = "native";
3816  }
3817 
3818  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3819  in_codec_name, decoder_name,
3820  out_codec_name, encoder_name);
3821  }
3822  av_log(NULL, AV_LOG_INFO, "\n");
3823  }
3824 
3825  if (ret) {
3826  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3827  return ret;
3828  }
3829 
3831 
3832  return 0;
3833 }
3834 
3835 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3836 static int need_output(void)
3837 {
3838  int i;
3839 
3840  for (i = 0; i < nb_output_streams; i++) {
3841  OutputStream *ost = output_streams[i];
3842  OutputFile *of = output_files[ost->file_index];
3843  AVFormatContext *os = output_files[ost->file_index]->ctx;
3844 
3845  if (ost->finished ||
3846  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3847  continue;
3848  if (ost->frame_number >= ost->max_frames) {
3849  int j;
3850  for (j = 0; j < of->ctx->nb_streams; j++)
3851  close_output_stream(output_streams[of->ost_index + j]);
3852  continue;
3853  }
3854 
3855  return 1;
3856  }
3857 
3858  return 0;
3859 }
3860 
3861 /**
3862  * Select the output stream to process.
3863  *
3864  * @return selected output stream, or NULL if none available
3865  */
3867 {
3868  int i;
3869  int64_t opts_min = INT64_MAX;
3870  OutputStream *ost_min = NULL;
3871 
3872  for (i = 0; i < nb_output_streams; i++) {
3873  OutputStream *ost = output_streams[i];
3874  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3875  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3876  AV_TIME_BASE_Q);
3877  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3879  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3880  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3881 
3882  if (!ost->initialized && !ost->inputs_done)
3883  return ost;
3884 
3885  if (!ost->finished && opts < opts_min) {
3886  opts_min = opts;
3887  ost_min = ost->unavailable ? NULL : ost;
3888  }
3889  }
3890  return ost_min;
3891 }
3892 
3893 static void set_tty_echo(int on)
3894 {
3895 #if HAVE_TERMIOS_H
3896  struct termios tty;
3897  if (tcgetattr(0, &tty) == 0) {
3898  if (on) tty.c_lflag |= ECHO;
3899  else tty.c_lflag &= ~ECHO;
3900  tcsetattr(0, TCSANOW, &tty);
3901  }
3902 #endif
3903 }
3904 
3905 static int check_keyboard_interaction(int64_t cur_time)
3906 {
3907  int i, ret, key;
3908  static int64_t last_time;
3909  if (received_nb_signals)
3910  return AVERROR_EXIT;
3911  /* read_key() returns 0 on EOF */
3912  if(cur_time - last_time >= 100000 && !run_as_daemon){
3913  key = read_key();
3914  last_time = cur_time;
3915  }else
3916  key = -1;
3917  if (key == 'q')
3918  return AVERROR_EXIT;
3919  if (key == '+') av_log_set_level(av_log_get_level()+10);
3920  if (key == '-') av_log_set_level(av_log_get_level()-10);
3921  if (key == 's') qp_hist ^= 1;
3922  if (key == 'h'){
3923  if (do_hex_dump){
3924  do_hex_dump = do_pkt_dump = 0;
3925  } else if(do_pkt_dump){
3926  do_hex_dump = 1;
3927  } else
3928  do_pkt_dump = 1;
3930  }
3931  if (key == 'c' || key == 'C'){
3932  char buf[4096], target[64], command[256], arg[256] = {0};
3933  double time;
3934  int k, n = 0;
3935  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3936  i = 0;
3937  set_tty_echo(1);
3938  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3939  if (k > 0)
3940  buf[i++] = k;
3941  buf[i] = 0;
3942  set_tty_echo(0);
3943  fprintf(stderr, "\n");
3944  if (k > 0 &&
3945  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3946  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3947  target, time, command, arg);
3948  for (i = 0; i < nb_filtergraphs; i++) {
3949  FilterGraph *fg = filtergraphs[i];
3950  if (fg->graph) {
3951  if (time < 0) {
3952  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3953  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3954  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3955  } else if (key == 'c') {
3956  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3957  ret = AVERROR_PATCHWELCOME;
3958  } else {
3959  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3960  if (ret < 0)
3961  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3962  }
3963  }
3964  }
3965  } else {
3967  "Parse error, at least 3 arguments were expected, "
3968  "only %d given in string '%s'\n", n, buf);
3969  }
3970  }
3971  if (key == 'd' || key == 'D'){
3972  int debug=0;
3973  if(key == 'D') {
3974  debug = input_streams[0]->st->codec->debug<<1;
3975  if(!debug) debug = 1;
3976  while(debug & (FF_DEBUG_DCT_COEFF
3977 #if FF_API_DEBUG_MV
3978  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3979 #endif
3980  )) //unsupported, would just crash
3981  debug += debug;
3982  }else{
3983  char buf[32];
3984  int k = 0;
3985  i = 0;
3986  set_tty_echo(1);
3987  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3988  if (k > 0)
3989  buf[i++] = k;
3990  buf[i] = 0;
3991  set_tty_echo(0);
3992  fprintf(stderr, "\n");
3993  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3994  fprintf(stderr,"error parsing debug value\n");
3995  }
3996  for(i=0;i<nb_input_streams;i++) {
3997  input_streams[i]->st->codec->debug = debug;
3998  }
3999  for(i=0;i<nb_output_streams;i++) {
4000  OutputStream *ost = output_streams[i];
4001  ost->enc_ctx->debug = debug;
4002  }
4003  if(debug) av_log_set_level(AV_LOG_DEBUG);
4004  fprintf(stderr,"debug=%d\n", debug);
4005  }
4006  if (key == '?'){
4007  fprintf(stderr, "key function\n"
4008  "? show this help\n"
4009  "+ increase verbosity\n"
4010  "- decrease verbosity\n"
4011  "c Send command to first matching filter supporting it\n"
4012  "C Send/Queue command to all matching filters\n"
4013  "D cycle through available debug modes\n"
4014  "h dump packets/hex press to cycle through the 3 states\n"
4015  "q quit\n"
4016  "s Show QP histogram\n"
4017  );
4018  }
4019  return 0;
4020 }
4021 
4022 #if HAVE_THREADS
4023 static void *input_thread(void *arg)
4024 {
4025  InputFile *f = arg;
4026  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4027  int ret = 0;
4028 
4029  while (1) {
4030  AVPacket pkt;
4031  ret = av_read_frame(f->ctx, &pkt);
4032 
4033  if (ret == AVERROR(EAGAIN)) {
4034  av_usleep(10000);
4035  continue;
4036  }
4037  if (ret < 0) {
4038  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4039  break;
4040  }
4041  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4042  if (flags && ret == AVERROR(EAGAIN)) {
4043  flags = 0;
4044  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4046  "Thread message queue blocking; consider raising the "
4047  "thread_queue_size option (current value: %d)\n",
4048  f->thread_queue_size);
4049  }
4050  if (ret < 0) {
4051  if (ret != AVERROR_EOF)
4052  av_log(f->ctx, AV_LOG_ERROR,
4053  "Unable to send packet to main thread: %s\n",
4054  av_err2str(ret));
4055  av_packet_unref(&pkt);
4056  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4057  break;
4058  }
4059  }
4060 
4061  return NULL;
4062 }
4063 
4064 static void free_input_thread(int i)
4065 {
4066  InputFile *f = input_files[i];
4067  AVPacket pkt;
4068 
4069  if (!f || !f->in_thread_queue)
4070  return;
4072  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4073  av_packet_unref(&pkt);
4074 
4075  pthread_join(f->thread, NULL);
4076  f->joined = 1;
4077  av_thread_message_queue_free(&f->in_thread_queue);
4078 }
4079 
4080 static void free_input_threads(void)
4081 {
4082  int i;
4083 
4084  for (i = 0; i < nb_input_files; i++)
4085  free_input_thread(i);
4086 }
4087 
4088 static int init_input_thread(int i)
4089 {
4090  int ret;
4091  InputFile *f = input_files[i];
4092 
4093  if (nb_input_files == 1)
4094  return 0;
4095 
4096  if (f->ctx->pb ? !f->ctx->pb->seekable :
4097  strcmp(f->ctx->iformat->name, "lavfi"))
4098  f->non_blocking = 1;
4099  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4100  f->thread_queue_size, sizeof(AVPacket));
4101  if (ret < 0)
4102  return ret;
4103 
4104  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4105  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4106  av_thread_message_queue_free(&f->in_thread_queue);
4107  return AVERROR(ret);
4108  }
4109 
4110  return 0;
4111 }
4112 
4113 static int init_input_threads(void)
4114 {
4115  int i, ret;
4116 
4117  for (i = 0; i < nb_input_files; i++) {
4118  ret = init_input_thread(i);
4119  if (ret < 0)
4120  return ret;
4121  }
4122  return 0;
4123 }
4124 
4125 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4126 {
4127  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4128  f->non_blocking ?
4130 }
4131 #endif
4132 
4134 {
4135  if (f->rate_emu) {
4136  int i;
4137  for (i = 0; i < f->nb_streams; i++) {
4138  InputStream *ist = input_streams[f->ist_index + i];
4139  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4140  int64_t now = av_gettime_relative() - ist->start;
4141  if (pts > now)
4142  return AVERROR(EAGAIN);
4143  }
4144  }
4145 
4146 #if HAVE_THREADS
4147  if (nb_input_files > 1)
4148  return get_input_packet_mt(f, pkt);
4149 #endif
4150  return av_read_frame(f->ctx, pkt);
4151 }
4152 
4153 static int got_eagain(void)
4154 {
4155  int i;
4156  for (i = 0; i < nb_output_streams; i++)
4157  if (output_streams[i]->unavailable)
4158  return 1;
4159  return 0;
4160 }
4161 
4162 static void reset_eagain(void)
4163 {
4164  int i;
4165  for (i = 0; i < nb_input_files; i++)
4166  input_files[i]->eagain = 0;
4167  for (i = 0; i < nb_output_streams; i++)
4168  output_streams[i]->unavailable = 0;
4169 }
4170 
4171 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4172 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4173  AVRational time_base)
4174 {
4175  int ret;
4176 
4177  if (!*duration) {
4178  *duration = tmp;
4179  return tmp_time_base;
4180  }
4181 
4182  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4183  if (ret < 0) {
4184  *duration = tmp;
4185  return tmp_time_base;
4186  }
4187 
4188  return time_base;
4189 }
4190 
4192 {
4193  InputStream *ist;
4194  AVCodecContext *avctx;
4195  int i, ret, has_audio = 0;
4196  int64_t duration = 0;
4197 
4198  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4199  if (ret < 0)
4200  return ret;
4201 
4202  for (i = 0; i < ifile->nb_streams; i++) {
4203  ist = input_streams[ifile->ist_index + i];
4204  avctx = ist->dec_ctx;
4205 
4206  /* duration is the length of the last frame in a stream
4207  * when audio stream is present we don't care about
4208  * last video frame length because it's not defined exactly */
4209  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4210  has_audio = 1;
4211  }
4212 
4213  for (i = 0; i < ifile->nb_streams; i++) {
4214  ist = input_streams[ifile->ist_index + i];
4215  avctx = ist->dec_ctx;
4216 
4217  if (has_audio) {
4218  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4219  AVRational sample_rate = {1, avctx->sample_rate};
4220 
4221  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4222  } else {
4223  continue;
4224  }
4225  } else {
4226  if (ist->framerate.num) {
4227  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4228  } else if (ist->st->avg_frame_rate.num) {
4229  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4230  } else {
4231  duration = 1;
4232  }
4233  }
4234  if (!ifile->duration)
4235  ifile->time_base = ist->st->time_base;
4236  /* the total duration of the stream, max_pts - min_pts is
4237  * the duration of the stream without the last frame */
4238  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4239  duration += ist->max_pts - ist->min_pts;
4240  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4241  ifile->time_base);
4242  }
4243 
4244  if (ifile->loop > 0)
4245  ifile->loop--;
4246 
4247  return ret;
4248 }
4249 
4250 /*
4251  * Return
4252  * - 0 -- one packet was read and processed
4253  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4254  * this function should be called again
4255  * - AVERROR_EOF -- this function should not be called again
4256  */
4257 static int process_input(int file_index)
4258 {
4259  InputFile *ifile = input_files[file_index];
4261  InputStream *ist;
4262  AVPacket pkt;
4263  int ret, thread_ret, i, j;
4264  int64_t duration;
4265  int64_t pkt_dts;
4266  int disable_discontinuity_correction = copy_ts;
4267 
4268  is = ifile->ctx;
4269  ret = get_input_packet(ifile, &pkt);
4270 
4271  if (ret == AVERROR(EAGAIN)) {
4272  ifile->eagain = 1;
4273  return ret;
4274  }
4275  if (ret < 0 && ifile->loop) {
4276  AVCodecContext *avctx;
4277  for (i = 0; i < ifile->nb_streams; i++) {
4278  ist = input_streams[ifile->ist_index + i];
4279  avctx = ist->dec_ctx;
4280  if (ist->decoding_needed) {
4281  ret = process_input_packet(ist, NULL, 1);
4282  if (ret>0)
4283  return 0;
4284  avcodec_flush_buffers(avctx);
4285  }
4286  }
4287 #if HAVE_THREADS
4288  free_input_thread(file_index);
4289 #endif
4290  ret = seek_to_start(ifile, is);
4291 #if HAVE_THREADS
4292  thread_ret = init_input_thread(file_index);
4293  if (thread_ret < 0)
4294  return thread_ret;
4295 #endif
4296  if (ret < 0)
4297  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4298  else
4299  ret = get_input_packet(ifile, &pkt);
4300  if (ret == AVERROR(EAGAIN)) {
4301  ifile->eagain = 1;
4302  return ret;
4303  }
4304  }
4305  if (ret < 0) {
4306  if (ret != AVERROR_EOF) {
4307  print_error(is->url, ret);
4308  if (exit_on_error)
4309  exit_program(1);
4310  }
4311 
4312  for (i = 0; i < ifile->nb_streams; i++) {
4313  ist = input_streams[ifile->ist_index + i];
4314  if (ist->decoding_needed) {
4315  ret = process_input_packet(ist, NULL, 0);
4316  if (ret>0)
4317  return 0;
4318  }
4319 
4320  /* mark all outputs that don't go through lavfi as finished */
4321  for (j = 0; j < nb_output_streams; j++) {
4322  OutputStream *ost = output_streams[j];
4323 
4324  if (ost->source_index == ifile->ist_index + i &&
4325  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4326  finish_output_stream(ost);
4327  }
4328  }
4329 
4330  ifile->eof_reached = 1;
4331  return AVERROR(EAGAIN);
4332  }
4333 
4334  reset_eagain();
4335 
4336  if (do_pkt_dump) {
4338  is->streams[pkt.stream_index]);
4339  }
4340  /* the following test is needed in case new streams appear
4341  dynamically in stream : we ignore them */
4342  if (pkt.stream_index >= ifile->nb_streams) {
4343  report_new_stream(file_index, &pkt);
4344  goto discard_packet;
4345  }
4346 
4347  ist = input_streams[ifile->ist_index + pkt.stream_index];
4348 
4349  ist->data_size += pkt.size;
4350  ist->nb_packets++;
4351 
4352  if (ist->discard)
4353  goto discard_packet;
4354 
4355  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4357  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4358  if (exit_on_error)
4359  exit_program(1);
4360  }
4361 
4362  if (debug_ts) {
4363  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4364  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4368  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4369  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4370  av_ts2str(input_files[ist->file_index]->ts_offset),
4371  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4372  }
4373 
4374  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4375  int64_t stime, stime2;
4376  // Correcting starttime based on the enabled streams
4377  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4378  // so we instead do it here as part of discontinuity handling
4379  if ( ist->next_dts == AV_NOPTS_VALUE
4380  && ifile->ts_offset == -is->start_time
4381  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4382  int64_t new_start_time = INT64_MAX;
4383  for (i=0; i<is->nb_streams; i++) {
4384  AVStream *st = is->streams[i];
4385  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4386  continue;
4387  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4388  }
4389  if (new_start_time > is->start_time) {
4390  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4391  ifile->ts_offset = -new_start_time;
4392  }
4393  }
4394 
4395  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4396  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4397  ist->wrap_correction_done = 1;
4398 
4399  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4400  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4401  ist->wrap_correction_done = 0;
4402  }
4403  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4404  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4405  ist->wrap_correction_done = 0;
4406  }
4407  }
4408 
4409  /* add the stream-global side data to the first packet */
4410  if (ist->nb_packets == 1) {
4411  for (i = 0; i < ist->st->nb_side_data; i++) {
4412  AVPacketSideData *src_sd = &ist->st->side_data[i];
4413  uint8_t *dst_data;
4414 
4415  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4416  continue;
4417 
4418  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4419  continue;
4420 
4421  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4422  if (!dst_data)
4423  exit_program(1);
4424 
4425  memcpy(dst_data, src_sd->data, src_sd->size);
4426  }
4427  }
4428 
4429  if (pkt.dts != AV_NOPTS_VALUE)
4430  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4431  if (pkt.pts != AV_NOPTS_VALUE)
4432  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4433 
4434  if (pkt.pts != AV_NOPTS_VALUE)
4435  pkt.pts *= ist->ts_scale;
4436  if (pkt.dts != AV_NOPTS_VALUE)
4437  pkt.dts *= ist->ts_scale;
4438 
4440  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4442  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4443  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4444  int64_t delta = pkt_dts - ifile->last_ts;
4445  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4446  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4447  ifile->ts_offset -= delta;
4449  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4450  delta, ifile->ts_offset);
4451  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4452  if (pkt.pts != AV_NOPTS_VALUE)
4453  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4454  }
4455  }
4456 
4457  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4458  if (pkt.pts != AV_NOPTS_VALUE) {
4459  pkt.pts += duration;
4460  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4461  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4462  }
4463 
4464  if (pkt.dts != AV_NOPTS_VALUE)
4465  pkt.dts += duration;
4466 
4468 
4469  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4470  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4471  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4472  ist->st->time_base, AV_TIME_BASE_Q,
4474  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4475  disable_discontinuity_correction = 0;
4476  }
4477 
4478  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4480  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4481  !disable_discontinuity_correction) {
4482  int64_t delta = pkt_dts - ist->next_dts;
4483  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4484  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4485  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4486  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4487  ifile->ts_offset -= delta;
4489  "timestamp discontinuity for stream #%d:%d "
4490  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4491  ist->file_index, ist->st->index, ist->st->id,
4493  delta, ifile->ts_offset);
4494  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4495  if (pkt.pts != AV_NOPTS_VALUE)
4496  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4497  }
4498  } else {
4499  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4500  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4501  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4502  pkt.dts = AV_NOPTS_VALUE;
4503  }
4504  if (pkt.pts != AV_NOPTS_VALUE){
4505  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4506  delta = pkt_pts - ist->next_dts;
4507  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4508  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4509  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4510  pkt.pts = AV_NOPTS_VALUE;
4511  }
4512  }
4513  }
4514  }
4515 
4516  if (pkt.dts != AV_NOPTS_VALUE)
4517  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4518 
4519  if (debug_ts) {
4520  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4522  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4523  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4524  av_ts2str(input_files[ist->file_index]->ts_offset),
4525  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4526  }
4527 
4528  sub2video_heartbeat(ist, pkt.pts);
4529 
4530  process_input_packet(ist, &pkt, 0);
4531 
4532 discard_packet:
4533  av_packet_unref(&pkt);
4534 
4535  return 0;
4536 }
4537 
4538 /**
4539  * Perform a step of transcoding for the specified filter graph.
4540  *
4541  * @param[in] graph filter graph to consider
4542  * @param[out] best_ist input stream where a frame would allow to continue
4543  * @return 0 for success, <0 for error
4544  */
4545 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4546 {
4547  int i, ret;
4548  int nb_requests, nb_requests_max = 0;
4549  InputFilter *ifilter;
4550  InputStream *ist;
4551 
4552  *best_ist = NULL;
4553  ret = avfilter_graph_request_oldest(graph->graph);
4554  if (ret >= 0)
4555  return reap_filters(0);
4556 
4557  if (ret == AVERROR_EOF) {
4558  ret = reap_filters(1);
4559  for (i = 0; i < graph->nb_outputs; i++)
4560  close_output_stream(graph->outputs[i]->ost);
4561  return ret;