FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  read(0, &ch, 1);
472  return ch;
473  }else{
474  return -1;
475  }
476  }
477 # endif
478  if(kbhit())
479  return(getch());
480 #endif
481  return -1;
482 }
483 
484 static int decode_interrupt_cb(void *ctx)
485 {
487 }
488 
490 
491 static void ffmpeg_cleanup(int ret)
492 {
493  int i, j;
494 
495  if (do_benchmark) {
496  int maxrss = getmaxrss() / 1024;
497  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
498  }
499 
500  for (i = 0; i < nb_filtergraphs; i++) {
501  FilterGraph *fg = filtergraphs[i];
503  for (j = 0; j < fg->nb_inputs; j++) {
504  InputFilter *ifilter = fg->inputs[j];
505  struct InputStream *ist = ifilter->ist;
506 
507  while (av_fifo_size(ifilter->frame_queue)) {
508  AVFrame *frame;
509  av_fifo_generic_read(ifilter->frame_queue, &frame,
510  sizeof(frame), NULL);
511  av_frame_free(&frame);
512  }
513  av_fifo_freep(&ifilter->frame_queue);
514  if (ist->sub2video.sub_queue) {
515  while (av_fifo_size(ist->sub2video.sub_queue)) {
516  AVSubtitle sub;
518  &sub, sizeof(sub), NULL);
519  avsubtitle_free(&sub);
520  }
522  }
523  av_buffer_unref(&ifilter->hw_frames_ctx);
524  av_freep(&ifilter->name);
525  av_freep(&fg->inputs[j]);
526  }
527  av_freep(&fg->inputs);
528  for (j = 0; j < fg->nb_outputs; j++) {
529  OutputFilter *ofilter = fg->outputs[j];
530 
531  avfilter_inout_free(&ofilter->out_tmp);
532  av_freep(&ofilter->name);
533  av_freep(&ofilter->formats);
534  av_freep(&ofilter->channel_layouts);
535  av_freep(&ofilter->sample_rates);
536  av_freep(&fg->outputs[j]);
537  }
538  av_freep(&fg->outputs);
539  av_freep(&fg->graph_desc);
540 
541  av_freep(&filtergraphs[i]);
542  }
543  av_freep(&filtergraphs);
544 
546 
547  /* close files */
548  for (i = 0; i < nb_output_files; i++) {
549  OutputFile *of = output_files[i];
551  if (!of)
552  continue;
553  s = of->ctx;
554  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
555  avio_closep(&s->pb);
557  av_dict_free(&of->opts);
558 
559  av_freep(&output_files[i]);
560  }
561  for (i = 0; i < nb_output_streams; i++) {
562  OutputStream *ost = output_streams[i];
563 
564  if (!ost)
565  continue;
566 
567  av_bsf_free(&ost->bsf_ctx);
568 
570  av_frame_free(&ost->last_frame);
571  av_dict_free(&ost->encoder_opts);
572 
573  av_freep(&ost->forced_keyframes);
575  av_freep(&ost->avfilter);
576  av_freep(&ost->logfile_prefix);
577 
579  ost->audio_channels_mapped = 0;
580 
581  av_dict_free(&ost->sws_dict);
582  av_dict_free(&ost->swr_opts);
583 
586 
587  if (ost->muxing_queue) {
588  while (av_fifo_size(ost->muxing_queue)) {
589  AVPacket pkt;
590  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
591  av_packet_unref(&pkt);
592  }
594  }
595 
596  av_freep(&output_streams[i]);
597  }
598 #if HAVE_THREADS
599  free_input_threads();
600 #endif
601  for (i = 0; i < nb_input_files; i++) {
602  avformat_close_input(&input_files[i]->ctx);
603  av_freep(&input_files[i]);
604  }
605  for (i = 0; i < nb_input_streams; i++) {
606  InputStream *ist = input_streams[i];
607 
610  av_dict_free(&ist->decoder_opts);
613  av_freep(&ist->filters);
614  av_freep(&ist->hwaccel_device);
615  av_freep(&ist->dts_buffer);
616 
618 
619  av_freep(&input_streams[i]);
620  }
621 
622  if (vstats_file) {
623  if (fclose(vstats_file))
625  "Error closing vstats file, loss of information possible: %s\n",
626  av_err2str(AVERROR(errno)));
627  }
629 
630  av_freep(&input_streams);
631  av_freep(&input_files);
632  av_freep(&output_streams);
633  av_freep(&output_files);
634 
635  uninit_opts();
636 
638 
639  if (received_sigterm) {
640  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
641  (int) received_sigterm);
642  } else if (ret && atomic_load(&transcode_init_done)) {
643  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
644  }
645  term_exit();
646  ffmpeg_exited = 1;
647 }
648 
650 {
651  AVDictionaryEntry *t = NULL;
652 
653  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
655  }
656 }
657 
659 {
661  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
662  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
663  exit_program(1);
664  }
665 }
666 
667 static void abort_codec_experimental(AVCodec *c, int encoder)
668 {
669  exit_program(1);
670 }
671 
672 static void update_benchmark(const char *fmt, ...)
673 {
674  if (do_benchmark_all) {
676  va_list va;
677  char buf[1024];
678 
679  if (fmt) {
680  va_start(va, fmt);
681  vsnprintf(buf, sizeof(buf), fmt, va);
682  va_end(va);
684  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
685  t.user_usec - current_time.user_usec,
686  t.sys_usec - current_time.sys_usec,
687  t.real_usec - current_time.real_usec, buf);
688  }
689  current_time = t;
690  }
691 }
692 
694 {
695  int i;
696  for (i = 0; i < nb_output_streams; i++) {
697  OutputStream *ost2 = output_streams[i];
698  ost2->finished |= ost == ost2 ? this_stream : others;
699  }
700 }
701 
702 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
703 {
704  AVFormatContext *s = of->ctx;
705  AVStream *st = ost->st;
706  int ret;
707 
708  /*
709  * Audio encoders may split the packets -- #frames in != #packets out.
710  * But there is no reordering, so we can limit the number of output packets
711  * by simply dropping them here.
712  * Counting encoded video frames needs to be done separately because of
713  * reordering, see do_video_out().
714  * Do not count the packet when unqueued because it has been counted when queued.
715  */
716  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
717  if (ost->frame_number >= ost->max_frames) {
718  av_packet_unref(pkt);
719  return;
720  }
721  ost->frame_number++;
722  }
723 
724  if (!of->header_written) {
725  AVPacket tmp_pkt = {0};
726  /* the muxer is not initialized yet, buffer the packet */
727  if (!av_fifo_space(ost->muxing_queue)) {
728  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
729  ost->max_muxing_queue_size);
730  if (new_size <= av_fifo_size(ost->muxing_queue)) {
732  "Too many packets buffered for output stream %d:%d.\n",
733  ost->file_index, ost->st->index);
734  exit_program(1);
735  }
736  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
737  if (ret < 0)
738  exit_program(1);
739  }
740  ret = av_packet_make_refcounted(pkt);
741  if (ret < 0)
742  exit_program(1);
743  av_packet_move_ref(&tmp_pkt, pkt);
744  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
745  return;
746  }
747 
750  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
751 
752  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
753  int i;
755  NULL);
756  ost->quality = sd ? AV_RL32(sd) : -1;
757  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
758 
759  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
760  if (sd && i < sd[5])
761  ost->error[i] = AV_RL64(sd + 8 + 8*i);
762  else
763  ost->error[i] = -1;
764  }
765 
766  if (ost->frame_rate.num && ost->is_cfr) {
767  if (pkt->duration > 0)
768  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
769  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
770  ost->mux_timebase);
771  }
772  }
773 
774  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
775 
776  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
777  if (pkt->dts != AV_NOPTS_VALUE &&
778  pkt->pts != AV_NOPTS_VALUE &&
779  pkt->dts > pkt->pts) {
780  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
781  pkt->dts, pkt->pts,
782  ost->file_index, ost->st->index);
783  pkt->pts =
784  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
785  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
786  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
787  }
789  pkt->dts != AV_NOPTS_VALUE &&
790  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
791  ost->last_mux_dts != AV_NOPTS_VALUE) {
792  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
793  if (pkt->dts < max) {
794  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
795  if (exit_on_error)
796  loglevel = AV_LOG_ERROR;
797  av_log(s, loglevel, "Non-monotonous DTS in output stream "
798  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
799  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
800  if (exit_on_error) {
801  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
802  exit_program(1);
803  }
804  av_log(s, loglevel, "changing to %"PRId64". This may result "
805  "in incorrect timestamps in the output file.\n",
806  max);
807  if (pkt->pts >= pkt->dts)
808  pkt->pts = FFMAX(pkt->pts, max);
809  pkt->dts = max;
810  }
811  }
812  }
813  ost->last_mux_dts = pkt->dts;
814 
815  ost->data_size += pkt->size;
816  ost->packets_written++;
817 
818  pkt->stream_index = ost->index;
819 
820  if (debug_ts) {
821  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
822  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
824  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
825  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
826  pkt->size
827  );
828  }
829 
830  ret = av_interleaved_write_frame(s, pkt);
831  if (ret < 0) {
832  print_error("av_interleaved_write_frame()", ret);
833  main_return_code = 1;
835  }
836  av_packet_unref(pkt);
837 }
838 
840 {
841  OutputFile *of = output_files[ost->file_index];
842 
843  ost->finished |= ENCODER_FINISHED;
844  if (of->shortest) {
845  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
846  of->recording_time = FFMIN(of->recording_time, end);
847  }
848 }
849 
850 /*
851  * Send a single packet to the output, applying any bitstream filters
852  * associated with the output stream. This may result in any number
853  * of packets actually being written, depending on what bitstream
854  * filters are applied. The supplied packet is consumed and will be
855  * blank (as if newly-allocated) when this function returns.
856  *
857  * If eof is set, instead indicate EOF to all bitstream filters and
858  * therefore flush any delayed packets to the output. A blank packet
859  * must be supplied in this case.
860  */
862  OutputStream *ost, int eof)
863 {
864  int ret = 0;
865 
866  /* apply the output bitstream filters */
867  if (ost->bsf_ctx) {
868  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
869  if (ret < 0)
870  goto finish;
871  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
872  write_packet(of, pkt, ost, 0);
873  if (ret == AVERROR(EAGAIN))
874  ret = 0;
875  } else if (!eof)
876  write_packet(of, pkt, ost, 0);
877 
878 finish:
879  if (ret < 0 && ret != AVERROR_EOF) {
880  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
881  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
882  if(exit_on_error)
883  exit_program(1);
884  }
885 }
886 
888 {
889  OutputFile *of = output_files[ost->file_index];
890 
891  if (of->recording_time != INT64_MAX &&
893  AV_TIME_BASE_Q) >= 0) {
894  close_output_stream(ost);
895  return 0;
896  }
897  return 1;
898 }
899 
901  AVFrame *frame)
902 {
903  AVCodecContext *enc = ost->enc_ctx;
904  AVPacket pkt;
905  int ret;
906 
907  av_init_packet(&pkt);
908  pkt.data = NULL;
909  pkt.size = 0;
910 
911  if (!check_recording_time(ost))
912  return;
913 
914  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
915  frame->pts = ost->sync_opts;
916  ost->sync_opts = frame->pts + frame->nb_samples;
917  ost->samples_encoded += frame->nb_samples;
918  ost->frames_encoded++;
919 
920  av_assert0(pkt.size || !pkt.data);
922  if (debug_ts) {
923  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
924  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
925  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
926  enc->time_base.num, enc->time_base.den);
927  }
928 
929  ret = avcodec_send_frame(enc, frame);
930  if (ret < 0)
931  goto error;
932 
933  while (1) {
934  ret = avcodec_receive_packet(enc, &pkt);
935  if (ret == AVERROR(EAGAIN))
936  break;
937  if (ret < 0)
938  goto error;
939 
940  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
941 
942  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
943 
944  if (debug_ts) {
945  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
946  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
947  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
948  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
949  }
950 
951  output_packet(of, &pkt, ost, 0);
952  }
953 
954  return;
955 error:
956  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
957  exit_program(1);
958 }
959 
960 static void do_subtitle_out(OutputFile *of,
961  OutputStream *ost,
962  AVSubtitle *sub)
963 {
964  int subtitle_out_max_size = 1024 * 1024;
965  int subtitle_out_size, nb, i;
966  AVCodecContext *enc;
967  AVPacket pkt;
968  int64_t pts;
969 
970  if (sub->pts == AV_NOPTS_VALUE) {
971  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
972  if (exit_on_error)
973  exit_program(1);
974  return;
975  }
976 
977  enc = ost->enc_ctx;
978 
979  if (!subtitle_out) {
980  subtitle_out = av_malloc(subtitle_out_max_size);
981  if (!subtitle_out) {
982  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
983  exit_program(1);
984  }
985  }
986 
987  /* Note: DVB subtitle need one packet to draw them and one other
988  packet to clear them */
989  /* XXX: signal it in the codec context ? */
991  nb = 2;
992  else
993  nb = 1;
994 
995  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
996  pts = sub->pts;
997  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
998  pts -= output_files[ost->file_index]->start_time;
999  for (i = 0; i < nb; i++) {
1000  unsigned save_num_rects = sub->num_rects;
1001 
1002  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1003  if (!check_recording_time(ost))
1004  return;
1005 
1006  sub->pts = pts;
1007  // start_display_time is required to be 0
1008  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1009  sub->end_display_time -= sub->start_display_time;
1010  sub->start_display_time = 0;
1011  if (i == 1)
1012  sub->num_rects = 0;
1013 
1014  ost->frames_encoded++;
1015 
1016  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1017  subtitle_out_max_size, sub);
1018  if (i == 1)
1019  sub->num_rects = save_num_rects;
1020  if (subtitle_out_size < 0) {
1021  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1022  exit_program(1);
1023  }
1024 
1025  av_init_packet(&pkt);
1026  pkt.data = subtitle_out;
1027  pkt.size = subtitle_out_size;
1028  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1029  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1030  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1031  /* XXX: the pts correction is handled here. Maybe handling
1032  it in the codec would be better */
1033  if (i == 0)
1034  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1035  else
1036  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1037  }
1038  pkt.dts = pkt.pts;
1039  output_packet(of, &pkt, ost, 0);
1040  }
1041 }
1042 
1043 static void do_video_out(OutputFile *of,
1044  OutputStream *ost,
1045  AVFrame *next_picture,
1046  double sync_ipts)
1047 {
1048  int ret, format_video_sync;
1049  AVPacket pkt;
1050  AVCodecContext *enc = ost->enc_ctx;
1051  AVCodecParameters *mux_par = ost->st->codecpar;
1052  AVRational frame_rate;
1053  int nb_frames, nb0_frames, i;
1054  double delta, delta0;
1055  double duration = 0;
1056  int frame_size = 0;
1057  InputStream *ist = NULL;
1059 
1060  if (ost->source_index >= 0)
1061  ist = input_streams[ost->source_index];
1062 
1063  frame_rate = av_buffersink_get_frame_rate(filter);
1064  if (frame_rate.num > 0 && frame_rate.den > 0)
1065  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1066 
1067  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1068  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1069 
1070  if (!ost->filters_script &&
1071  !ost->filters &&
1072  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1073  next_picture &&
1074  ist &&
1075  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1076  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1077  }
1078 
1079  if (!next_picture) {
1080  //end, flushing
1081  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1082  ost->last_nb0_frames[1],
1083  ost->last_nb0_frames[2]);
1084  } else {
1085  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1086  delta = delta0 + duration;
1087 
1088  /* by default, we output a single frame */
1089  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1090  nb_frames = 1;
1091 
1092  format_video_sync = video_sync_method;
1093  if (format_video_sync == VSYNC_AUTO) {
1094  if(!strcmp(of->ctx->oformat->name, "avi")) {
1095  format_video_sync = VSYNC_VFR;
1096  } else
1097  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1098  if ( ist
1099  && format_video_sync == VSYNC_CFR
1100  && input_files[ist->file_index]->ctx->nb_streams == 1
1101  && input_files[ist->file_index]->input_ts_offset == 0) {
1102  format_video_sync = VSYNC_VSCFR;
1103  }
1104  if (format_video_sync == VSYNC_CFR && copy_ts) {
1105  format_video_sync = VSYNC_VSCFR;
1106  }
1107  }
1108  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1109 
1110  if (delta0 < 0 &&
1111  delta > 0 &&
1112  format_video_sync != VSYNC_PASSTHROUGH &&
1113  format_video_sync != VSYNC_DROP) {
1114  if (delta0 < -0.6) {
1115  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1116  } else
1117  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1118  sync_ipts = ost->sync_opts;
1119  duration += delta0;
1120  delta0 = 0;
1121  }
1122 
1123  switch (format_video_sync) {
1124  case VSYNC_VSCFR:
1125  if (ost->frame_number == 0 && delta0 >= 0.5) {
1126  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1127  delta = duration;
1128  delta0 = 0;
1129  ost->sync_opts = llrint(sync_ipts);
1130  }
1131  case VSYNC_CFR:
1132  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1133  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1134  nb_frames = 0;
1135  } else if (delta < -1.1)
1136  nb_frames = 0;
1137  else if (delta > 1.1) {
1138  nb_frames = lrintf(delta);
1139  if (delta0 > 1.1)
1140  nb0_frames = llrintf(delta0 - 0.6);
1141  }
1142  break;
1143  case VSYNC_VFR:
1144  if (delta <= -0.6)
1145  nb_frames = 0;
1146  else if (delta > 0.6)
1147  ost->sync_opts = llrint(sync_ipts);
1148  break;
1149  case VSYNC_DROP:
1150  case VSYNC_PASSTHROUGH:
1151  ost->sync_opts = llrint(sync_ipts);
1152  break;
1153  default:
1154  av_assert0(0);
1155  }
1156  }
1157 
1158  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1159  nb0_frames = FFMIN(nb0_frames, nb_frames);
1160 
1161  memmove(ost->last_nb0_frames + 1,
1162  ost->last_nb0_frames,
1163  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1164  ost->last_nb0_frames[0] = nb0_frames;
1165 
1166  if (nb0_frames == 0 && ost->last_dropped) {
1167  nb_frames_drop++;
1169  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1170  ost->frame_number, ost->st->index, ost->last_frame->pts);
1171  }
1172  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1173  if (nb_frames > dts_error_threshold * 30) {
1174  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1175  nb_frames_drop++;
1176  return;
1177  }
1178  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1179  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1180  if (nb_frames_dup > dup_warning) {
1181  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1182  dup_warning *= 10;
1183  }
1184  }
1185  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1186 
1187  /* duplicates frame if needed */
1188  for (i = 0; i < nb_frames; i++) {
1189  AVFrame *in_picture;
1190  int forced_keyframe = 0;
1191  double pts_time;
1192  av_init_packet(&pkt);
1193  pkt.data = NULL;
1194  pkt.size = 0;
1195 
1196  if (i < nb0_frames && ost->last_frame) {
1197  in_picture = ost->last_frame;
1198  } else
1199  in_picture = next_picture;
1200 
1201  if (!in_picture)
1202  return;
1203 
1204  in_picture->pts = ost->sync_opts;
1205 
1206  if (!check_recording_time(ost))
1207  return;
1208 
1210  ost->top_field_first >= 0)
1211  in_picture->top_field_first = !!ost->top_field_first;
1212 
1213  if (in_picture->interlaced_frame) {
1214  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1215  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1216  else
1217  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1218  } else
1219  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1220 
1221  in_picture->quality = enc->global_quality;
1222  in_picture->pict_type = 0;
1223 
1224  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1225  in_picture->pts != AV_NOPTS_VALUE)
1226  ost->forced_kf_ref_pts = in_picture->pts;
1227 
1228  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1229  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1230  if (ost->forced_kf_index < ost->forced_kf_count &&
1231  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1232  ost->forced_kf_index++;
1233  forced_keyframe = 1;
1234  } else if (ost->forced_keyframes_pexpr) {
1235  double res;
1236  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1239  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1245  res);
1246  if (res) {
1247  forced_keyframe = 1;
1253  }
1254 
1256  } else if ( ost->forced_keyframes
1257  && !strncmp(ost->forced_keyframes, "source", 6)
1258  && in_picture->key_frame==1
1259  && !i) {
1260  forced_keyframe = 1;
1261  }
1262 
1263  if (forced_keyframe) {
1264  in_picture->pict_type = AV_PICTURE_TYPE_I;
1265  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1266  }
1267 
1269  if (debug_ts) {
1270  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1271  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1272  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1273  enc->time_base.num, enc->time_base.den);
1274  }
1275 
1276  ost->frames_encoded++;
1277 
1278  ret = avcodec_send_frame(enc, in_picture);
1279  if (ret < 0)
1280  goto error;
1281  // Make sure Closed Captions will not be duplicated
1283 
1284  while (1) {
1285  ret = avcodec_receive_packet(enc, &pkt);
1286  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1287  if (ret == AVERROR(EAGAIN))
1288  break;
1289  if (ret < 0)
1290  goto error;
1291 
1292  if (debug_ts) {
1293  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1294  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1295  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1296  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1297  }
1298 
1299  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1300  pkt.pts = ost->sync_opts;
1301 
1302  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1303 
1304  if (debug_ts) {
1305  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1306  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1308  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1309  }
1310 
1311  frame_size = pkt.size;
1312  output_packet(of, &pkt, ost, 0);
1313 
1314  /* if two pass, output log */
1315  if (ost->logfile && enc->stats_out) {
1316  fprintf(ost->logfile, "%s", enc->stats_out);
1317  }
1318  }
1319  ost->sync_opts++;
1320  /*
1321  * For video, number of frames in == number of packets out.
1322  * But there may be reordering, so we can't throw away frames on encoder
1323  * flush, we need to limit them here, before they go into encoder.
1324  */
1325  ost->frame_number++;
1326 
1327  if (vstats_filename && frame_size)
1328  do_video_stats(ost, frame_size);
1329  }
1330 
1331  if (!ost->last_frame)
1332  ost->last_frame = av_frame_alloc();
1333  av_frame_unref(ost->last_frame);
1334  if (next_picture && ost->last_frame)
1335  av_frame_ref(ost->last_frame, next_picture);
1336  else
1337  av_frame_free(&ost->last_frame);
1338 
1339  return;
1340 error:
1341  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1342  exit_program(1);
1343 }
1344 
1345 static double psnr(double d)
1346 {
1347  return -10.0 * log10(d);
1348 }
1349 
1351 {
1352  AVCodecContext *enc;
1353  int frame_number;
1354  double ti1, bitrate, avg_bitrate;
1355 
1356  /* this is executed just the first time do_video_stats is called */
1357  if (!vstats_file) {
1358  vstats_file = fopen(vstats_filename, "w");
1359  if (!vstats_file) {
1360  perror("fopen");
1361  exit_program(1);
1362  }
1363  }
1364 
1365  enc = ost->enc_ctx;
1366  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1367  frame_number = ost->st->nb_frames;
1368  if (vstats_version <= 1) {
1369  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1370  ost->quality / (float)FF_QP2LAMBDA);
1371  } else {
1372  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1373  ost->quality / (float)FF_QP2LAMBDA);
1374  }
1375 
1376  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1377  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1378 
1379  fprintf(vstats_file,"f_size= %6d ", frame_size);
1380  /* compute pts value */
1381  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1382  if (ti1 < 0.01)
1383  ti1 = 0.01;
1384 
1385  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1386  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1387  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1388  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1389  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1390  }
1391 }
1392 
1393 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1394 
1395 static int init_output_stream_wrapper(OutputStream *ost, unsigned int fatal)
1396 {
1397  int ret = AVERROR_BUG;
1398  char error[1024] = {0};
1399 
1400  if (ost->initialized)
1401  return 0;
1402 
1403  ret = init_output_stream(ost, error, sizeof(error));
1404  if (ret < 0) {
1405  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1406  ost->file_index, ost->index, error);
1407 
1408  if (fatal)
1409  exit_program(1);
1410  }
1411 
1412  return ret;
1413 }
1414 
1416 {
1417  OutputFile *of = output_files[ost->file_index];
1418  int i;
1419 
1421 
1422  if (of->shortest) {
1423  for (i = 0; i < of->ctx->nb_streams; i++)
1424  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1425  }
1426 }
1427 
1428 /**
1429  * Get and encode new output from any of the filtergraphs, without causing
1430  * activity.
1431  *
1432  * @return 0 for success, <0 for severe errors
1433  */
1434 static int reap_filters(int flush)
1435 {
1436  AVFrame *filtered_frame = NULL;
1437  int i;
1438 
1439  /* Reap all buffers present in the buffer sinks */
1440  for (i = 0; i < nb_output_streams; i++) {
1441  OutputStream *ost = output_streams[i];
1442  OutputFile *of = output_files[ost->file_index];
1444  AVCodecContext *enc = ost->enc_ctx;
1445  int ret = 0;
1446 
1447  if (!ost->filter || !ost->filter->graph->graph)
1448  continue;
1449  filter = ost->filter->filter;
1450 
1452 
1453  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1454  return AVERROR(ENOMEM);
1455  }
1456  filtered_frame = ost->filtered_frame;
1457 
1458  while (1) {
1459  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1460  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1462  if (ret < 0) {
1463  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1465  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1466  } else if (flush && ret == AVERROR_EOF) {
1468  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1469  }
1470  break;
1471  }
1472  if (ost->finished) {
1473  av_frame_unref(filtered_frame);
1474  continue;
1475  }
1476  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1477  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1478  AVRational filter_tb = av_buffersink_get_time_base(filter);
1479  AVRational tb = enc->time_base;
1480  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1481 
1482  tb.den <<= extra_bits;
1483  float_pts =
1484  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1485  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1486  float_pts /= 1 << extra_bits;
1487  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1488  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1489 
1490  filtered_frame->pts =
1491  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1492  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1493  }
1494 
1495  switch (av_buffersink_get_type(filter)) {
1496  case AVMEDIA_TYPE_VIDEO:
1497  if (!ost->frame_aspect_ratio.num)
1498  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1499 
1500  if (debug_ts) {
1501  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1502  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1503  float_pts,
1504  enc->time_base.num, enc->time_base.den);
1505  }
1506 
1507  do_video_out(of, ost, filtered_frame, float_pts);
1508  break;
1509  case AVMEDIA_TYPE_AUDIO:
1510  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1511  enc->channels != filtered_frame->channels) {
1513  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1514  break;
1515  }
1516  do_audio_out(of, ost, filtered_frame);
1517  break;
1518  default:
1519  // TODO support subtitle filters
1520  av_assert0(0);
1521  }
1522 
1523  av_frame_unref(filtered_frame);
1524  }
1525  }
1526 
1527  return 0;
1528 }
1529 
1530 static void print_final_stats(int64_t total_size)
1531 {
1532  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1533  uint64_t subtitle_size = 0;
1534  uint64_t data_size = 0;
1535  float percent = -1.0;
1536  int i, j;
1537  int pass1_used = 1;
1538 
1539  for (i = 0; i < nb_output_streams; i++) {
1540  OutputStream *ost = output_streams[i];
1541  switch (ost->enc_ctx->codec_type) {
1542  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1543  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1544  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1545  default: other_size += ost->data_size; break;
1546  }
1547  extra_size += ost->enc_ctx->extradata_size;
1548  data_size += ost->data_size;
1551  pass1_used = 0;
1552  }
1553 
1554  if (data_size && total_size>0 && total_size >= data_size)
1555  percent = 100.0 * (total_size - data_size) / data_size;
1556 
1557  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1558  video_size / 1024.0,
1559  audio_size / 1024.0,
1560  subtitle_size / 1024.0,
1561  other_size / 1024.0,
1562  extra_size / 1024.0);
1563  if (percent >= 0.0)
1564  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1565  else
1566  av_log(NULL, AV_LOG_INFO, "unknown");
1567  av_log(NULL, AV_LOG_INFO, "\n");
1568 
1569  /* print verbose per-stream stats */
1570  for (i = 0; i < nb_input_files; i++) {
1571  InputFile *f = input_files[i];
1572  uint64_t total_packets = 0, total_size = 0;
1573 
1574  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1575  i, f->ctx->url);
1576 
1577  for (j = 0; j < f->nb_streams; j++) {
1578  InputStream *ist = input_streams[f->ist_index + j];
1579  enum AVMediaType type = ist->dec_ctx->codec_type;
1580 
1581  total_size += ist->data_size;
1582  total_packets += ist->nb_packets;
1583 
1584  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1585  i, j, media_type_string(type));
1586  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1587  ist->nb_packets, ist->data_size);
1588 
1589  if (ist->decoding_needed) {
1590  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1591  ist->frames_decoded);
1592  if (type == AVMEDIA_TYPE_AUDIO)
1593  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1594  av_log(NULL, AV_LOG_VERBOSE, "; ");
1595  }
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, "\n");
1598  }
1599 
1600  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1601  total_packets, total_size);
1602  }
1603 
1604  for (i = 0; i < nb_output_files; i++) {
1605  OutputFile *of = output_files[i];
1606  uint64_t total_packets = 0, total_size = 0;
1607 
1608  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1609  i, of->ctx->url);
1610 
1611  for (j = 0; j < of->ctx->nb_streams; j++) {
1612  OutputStream *ost = output_streams[of->ost_index + j];
1613  enum AVMediaType type = ost->enc_ctx->codec_type;
1614 
1615  total_size += ost->data_size;
1616  total_packets += ost->packets_written;
1617 
1618  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1619  i, j, media_type_string(type));
1620  if (ost->encoding_needed) {
1621  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1622  ost->frames_encoded);
1623  if (type == AVMEDIA_TYPE_AUDIO)
1624  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1625  av_log(NULL, AV_LOG_VERBOSE, "; ");
1626  }
1627 
1628  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1629  ost->packets_written, ost->data_size);
1630 
1631  av_log(NULL, AV_LOG_VERBOSE, "\n");
1632  }
1633 
1634  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1635  total_packets, total_size);
1636  }
1637  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1638  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1639  if (pass1_used) {
1640  av_log(NULL, AV_LOG_WARNING, "\n");
1641  } else {
1642  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1643  }
1644  }
1645 }
1646 
1647 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1648 {
1649  AVBPrint buf, buf_script;
1650  OutputStream *ost;
1651  AVFormatContext *oc;
1652  int64_t total_size;
1653  AVCodecContext *enc;
1654  int frame_number, vid, i;
1655  double bitrate;
1656  double speed;
1657  int64_t pts = INT64_MIN + 1;
1658  static int64_t last_time = -1;
1659  static int qp_histogram[52];
1660  int hours, mins, secs, us;
1661  const char *hours_sign;
1662  int ret;
1663  float t;
1664 
1665  if (!print_stats && !is_last_report && !progress_avio)
1666  return;
1667 
1668  if (!is_last_report) {
1669  if (last_time == -1) {
1670  last_time = cur_time;
1671  return;
1672  }
1673  if ((cur_time - last_time) < 500000)
1674  return;
1675  last_time = cur_time;
1676  }
1677 
1678  t = (cur_time-timer_start) / 1000000.0;
1679 
1680 
1681  oc = output_files[0]->ctx;
1682 
1683  total_size = avio_size(oc->pb);
1684  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1685  total_size = avio_tell(oc->pb);
1686 
1687  vid = 0;
1689  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1690  for (i = 0; i < nb_output_streams; i++) {
1691  float q = -1;
1692  ost = output_streams[i];
1693  enc = ost->enc_ctx;
1694  if (!ost->stream_copy)
1695  q = ost->quality / (float) FF_QP2LAMBDA;
1696 
1697  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1698  av_bprintf(&buf, "q=%2.1f ", q);
1699  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1700  ost->file_index, ost->index, q);
1701  }
1702  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1703  float fps;
1704 
1705  frame_number = ost->frame_number;
1706  fps = t > 1 ? frame_number / t : 0;
1707  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1708  frame_number, fps < 9.95, fps, q);
1709  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1710  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1711  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1712  ost->file_index, ost->index, q);
1713  if (is_last_report)
1714  av_bprintf(&buf, "L");
1715  if (qp_hist) {
1716  int j;
1717  int qp = lrintf(q);
1718  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1719  qp_histogram[qp]++;
1720  for (j = 0; j < 32; j++)
1721  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1722  }
1723 
1724  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1725  int j;
1726  double error, error_sum = 0;
1727  double scale, scale_sum = 0;
1728  double p;
1729  char type[3] = { 'Y','U','V' };
1730  av_bprintf(&buf, "PSNR=");
1731  for (j = 0; j < 3; j++) {
1732  if (is_last_report) {
1733  error = enc->error[j];
1734  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1735  } else {
1736  error = ost->error[j];
1737  scale = enc->width * enc->height * 255.0 * 255.0;
1738  }
1739  if (j)
1740  scale /= 4;
1741  error_sum += error;
1742  scale_sum += scale;
1743  p = psnr(error / scale);
1744  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1745  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1746  ost->file_index, ost->index, type[j] | 32, p);
1747  }
1748  p = psnr(error_sum / scale_sum);
1749  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1750  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1751  ost->file_index, ost->index, p);
1752  }
1753  vid = 1;
1754  }
1755  /* compute min output value */
1757  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1758  ost->st->time_base, AV_TIME_BASE_Q));
1759  if (is_last_report)
1760  nb_frames_drop += ost->last_dropped;
1761  }
1762 
1763  secs = FFABS(pts) / AV_TIME_BASE;
1764  us = FFABS(pts) % AV_TIME_BASE;
1765  mins = secs / 60;
1766  secs %= 60;
1767  hours = mins / 60;
1768  mins %= 60;
1769  hours_sign = (pts < 0) ? "-" : "";
1770 
1771  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1772  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1773 
1774  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1775  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1776  if (pts == AV_NOPTS_VALUE) {
1777  av_bprintf(&buf, "N/A ");
1778  } else {
1779  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1780  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1781  }
1782 
1783  if (bitrate < 0) {
1784  av_bprintf(&buf, "bitrate=N/A");
1785  av_bprintf(&buf_script, "bitrate=N/A\n");
1786  }else{
1787  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1788  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1789  }
1790 
1791  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1792  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1793  if (pts == AV_NOPTS_VALUE) {
1794  av_bprintf(&buf_script, "out_time_us=N/A\n");
1795  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1796  av_bprintf(&buf_script, "out_time=N/A\n");
1797  } else {
1798  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1799  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1800  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1801  hours_sign, hours, mins, secs, us);
1802  }
1803 
1805  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1806  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1807  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1808 
1809  if (speed < 0) {
1810  av_bprintf(&buf, " speed=N/A");
1811  av_bprintf(&buf_script, "speed=N/A\n");
1812  } else {
1813  av_bprintf(&buf, " speed=%4.3gx", speed);
1814  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1815  }
1816 
1817  if (print_stats || is_last_report) {
1818  const char end = is_last_report ? '\n' : '\r';
1819  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1820  fprintf(stderr, "%s %c", buf.str, end);
1821  } else
1822  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1823 
1824  fflush(stderr);
1825  }
1826  av_bprint_finalize(&buf, NULL);
1827 
1828  if (progress_avio) {
1829  av_bprintf(&buf_script, "progress=%s\n",
1830  is_last_report ? "end" : "continue");
1831  avio_write(progress_avio, buf_script.str,
1832  FFMIN(buf_script.len, buf_script.size - 1));
1833  avio_flush(progress_avio);
1834  av_bprint_finalize(&buf_script, NULL);
1835  if (is_last_report) {
1836  if ((ret = avio_closep(&progress_avio)) < 0)
1838  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1839  }
1840  }
1841 
1842  if (is_last_report)
1843  print_final_stats(total_size);
1844 }
1845 
1847 {
1848  // We never got any input. Set a fake format, which will
1849  // come from libavformat.
1850  ifilter->format = par->format;
1851  ifilter->sample_rate = par->sample_rate;
1852  ifilter->channels = par->channels;
1853  ifilter->channel_layout = par->channel_layout;
1854  ifilter->width = par->width;
1855  ifilter->height = par->height;
1856  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1857 }
1858 
1859 static void flush_encoders(void)
1860 {
1861  int i, ret;
1862 
1863  for (i = 0; i < nb_output_streams; i++) {
1864  OutputStream *ost = output_streams[i];
1865  AVCodecContext *enc = ost->enc_ctx;
1866  OutputFile *of = output_files[ost->file_index];
1867 
1868  if (!ost->encoding_needed)
1869  continue;
1870 
1871  // Try to enable encoding with no input frames.
1872  // Maybe we should just let encoding fail instead.
1873  if (!ost->initialized) {
1874  FilterGraph *fg = ost->filter->graph;
1875 
1877  "Finishing stream %d:%d without any data written to it.\n",
1878  ost->file_index, ost->st->index);
1879 
1880  if (ost->filter && !fg->graph) {
1881  int x;
1882  for (x = 0; x < fg->nb_inputs; x++) {
1883  InputFilter *ifilter = fg->inputs[x];
1884  if (ifilter->format < 0)
1885  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1886  }
1887 
1889  continue;
1890 
1891  ret = configure_filtergraph(fg);
1892  if (ret < 0) {
1893  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1894  exit_program(1);
1895  }
1896 
1897  finish_output_stream(ost);
1898  }
1899 
1901  }
1902 
1904  continue;
1905 
1906  for (;;) {
1907  const char *desc = NULL;
1908  AVPacket pkt;
1909  int pkt_size;
1910 
1911  switch (enc->codec_type) {
1912  case AVMEDIA_TYPE_AUDIO:
1913  desc = "audio";
1914  break;
1915  case AVMEDIA_TYPE_VIDEO:
1916  desc = "video";
1917  break;
1918  default:
1919  av_assert0(0);
1920  }
1921 
1922  av_init_packet(&pkt);
1923  pkt.data = NULL;
1924  pkt.size = 0;
1925 
1927 
1928  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1929  ret = avcodec_send_frame(enc, NULL);
1930  if (ret < 0) {
1931  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1932  desc,
1933  av_err2str(ret));
1934  exit_program(1);
1935  }
1936  }
1937 
1938  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1939  if (ret < 0 && ret != AVERROR_EOF) {
1940  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1941  desc,
1942  av_err2str(ret));
1943  exit_program(1);
1944  }
1945  if (ost->logfile && enc->stats_out) {
1946  fprintf(ost->logfile, "%s", enc->stats_out);
1947  }
1948  if (ret == AVERROR_EOF) {
1949  output_packet(of, &pkt, ost, 1);
1950  break;
1951  }
1952  if (ost->finished & MUXER_FINISHED) {
1953  av_packet_unref(&pkt);
1954  continue;
1955  }
1956  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1957  pkt_size = pkt.size;
1958  output_packet(of, &pkt, ost, 0);
1960  do_video_stats(ost, pkt_size);
1961  }
1962  }
1963  }
1964 }
1965 
1966 /*
1967  * Check whether a packet from ist should be written into ost at this time
1968  */
1970 {
1971  OutputFile *of = output_files[ost->file_index];
1972  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1973 
1974  if (ost->source_index != ist_index)
1975  return 0;
1976 
1977  if (ost->finished)
1978  return 0;
1979 
1980  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1981  return 0;
1982 
1983  return 1;
1984 }
1985 
1986 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1987 {
1988  OutputFile *of = output_files[ost->file_index];
1989  InputFile *f = input_files [ist->file_index];
1990  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1991  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1992  AVPacket opkt;
1993 
1994  // EOF: flush output bitstream filters.
1995  if (!pkt) {
1996  av_init_packet(&opkt);
1997  opkt.data = NULL;
1998  opkt.size = 0;
1999  output_packet(of, &opkt, ost, 1);
2000  return;
2001  }
2002 
2003  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2005  return;
2006 
2007  if (!ost->frame_number && !ost->copy_prior_start) {
2008  int64_t comp_start = start_time;
2009  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2010  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2011  if (pkt->pts == AV_NOPTS_VALUE ?
2012  ist->pts < comp_start :
2013  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2014  return;
2015  }
2016 
2017  if (of->recording_time != INT64_MAX &&
2018  ist->pts >= of->recording_time + start_time) {
2019  close_output_stream(ost);
2020  return;
2021  }
2022 
2023  if (f->recording_time != INT64_MAX) {
2024  start_time = f->ctx->start_time;
2025  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2026  start_time += f->start_time;
2027  if (ist->pts >= f->recording_time + start_time) {
2028  close_output_stream(ost);
2029  return;
2030  }
2031  }
2032 
2033  /* force the input stream PTS */
2034  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2035  ost->sync_opts++;
2036 
2037  if (av_packet_ref(&opkt, pkt) < 0)
2038  exit_program(1);
2039 
2040  if (pkt->pts != AV_NOPTS_VALUE)
2041  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2042 
2043  if (pkt->dts == AV_NOPTS_VALUE) {
2044  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2045  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2047  if(!duration)
2048  duration = ist->dec_ctx->frame_size;
2049  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2050  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2052  /* dts will be set immediately afterwards to what pts is now */
2053  opkt.pts = opkt.dts - ost_tb_start_time;
2054  } else
2055  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2056  opkt.dts -= ost_tb_start_time;
2057 
2058  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2059 
2060  output_packet(of, &opkt, ost, 0);
2061 }
2062 
2064 {
2065  AVCodecContext *dec = ist->dec_ctx;
2066 
2067  if (!dec->channel_layout) {
2068  char layout_name[256];
2069 
2070  if (dec->channels > ist->guess_layout_max)
2071  return 0;
2073  if (!dec->channel_layout)
2074  return 0;
2075  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2076  dec->channels, dec->channel_layout);
2077  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2078  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2079  }
2080  return 1;
2081 }
2082 
2083 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2084 {
2085  if (*got_output || ret<0)
2086  decode_error_stat[ret<0] ++;
2087 
2088  if (ret < 0 && exit_on_error)
2089  exit_program(1);
2090 
2091  if (*got_output && ist) {
2094  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2095  if (exit_on_error)
2096  exit_program(1);
2097  }
2098  }
2099 }
2100 
2101 // Filters can be configured only if the formats of all inputs are known.
2103 {
2104  int i;
2105  for (i = 0; i < fg->nb_inputs; i++) {
2106  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2107  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2108  return 0;
2109  }
2110  return 1;
2111 }
2112 
2114 {
2115  FilterGraph *fg = ifilter->graph;
2116  int need_reinit, ret, i;
2117 
2118  /* determine if the parameters for this input changed */
2119  need_reinit = ifilter->format != frame->format;
2120 
2121  switch (ifilter->ist->st->codecpar->codec_type) {
2122  case AVMEDIA_TYPE_AUDIO:
2123  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2124  ifilter->channels != frame->channels ||
2125  ifilter->channel_layout != frame->channel_layout;
2126  break;
2127  case AVMEDIA_TYPE_VIDEO:
2128  need_reinit |= ifilter->width != frame->width ||
2129  ifilter->height != frame->height;
2130  break;
2131  }
2132 
2133  if (!ifilter->ist->reinit_filters && fg->graph)
2134  need_reinit = 0;
2135 
2136  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2137  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2138  need_reinit = 1;
2139 
2140  if (need_reinit) {
2141  ret = ifilter_parameters_from_frame(ifilter, frame);
2142  if (ret < 0)
2143  return ret;
2144  }
2145 
2146  /* (re)init the graph if possible, otherwise buffer the frame and return */
2147  if (need_reinit || !fg->graph) {
2148  for (i = 0; i < fg->nb_inputs; i++) {
2149  if (!ifilter_has_all_input_formats(fg)) {
2150  AVFrame *tmp = av_frame_clone(frame);
2151  if (!tmp)
2152  return AVERROR(ENOMEM);
2153  av_frame_unref(frame);
2154 
2155  if (!av_fifo_space(ifilter->frame_queue)) {
2156  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2157  if (ret < 0) {
2158  av_frame_free(&tmp);
2159  return ret;
2160  }
2161  }
2162  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2163  return 0;
2164  }
2165  }
2166 
2167  ret = reap_filters(1);
2168  if (ret < 0 && ret != AVERROR_EOF) {
2169  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2170  return ret;
2171  }
2172 
2173  ret = configure_filtergraph(fg);
2174  if (ret < 0) {
2175  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2176  return ret;
2177  }
2178  }
2179 
2181  if (ret < 0) {
2182  if (ret != AVERROR_EOF)
2183  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2184  return ret;
2185  }
2186 
2187  return 0;
2188 }
2189 
2190 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2191 {
2192  int ret;
2193 
2194  ifilter->eof = 1;
2195 
2196  if (ifilter->filter) {
2197  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2198  if (ret < 0)
2199  return ret;
2200  } else {
2201  // the filtergraph was never configured
2202  if (ifilter->format < 0)
2203  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2204  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2205  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2206  return AVERROR_INVALIDDATA;
2207  }
2208  }
2209 
2210  return 0;
2211 }
2212 
2213 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2214 // There is the following difference: if you got a frame, you must call
2215 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2216 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2217 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2218 {
2219  int ret;
2220 
2221  *got_frame = 0;
2222 
2223  if (pkt) {
2224  ret = avcodec_send_packet(avctx, pkt);
2225  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2226  // decoded frames with avcodec_receive_frame() until done.
2227  if (ret < 0 && ret != AVERROR_EOF)
2228  return ret;
2229  }
2230 
2231  ret = avcodec_receive_frame(avctx, frame);
2232  if (ret < 0 && ret != AVERROR(EAGAIN))
2233  return ret;
2234  if (ret >= 0)
2235  *got_frame = 1;
2236 
2237  return 0;
2238 }
2239 
2241 {
2242  int i, ret;
2243  AVFrame *f;
2244 
2245  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2246  for (i = 0; i < ist->nb_filters; i++) {
2247  if (i < ist->nb_filters - 1) {
2248  f = ist->filter_frame;
2249  ret = av_frame_ref(f, decoded_frame);
2250  if (ret < 0)
2251  break;
2252  } else
2253  f = decoded_frame;
2254  ret = ifilter_send_frame(ist->filters[i], f);
2255  if (ret == AVERROR_EOF)
2256  ret = 0; /* ignore */
2257  if (ret < 0) {
2259  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2260  break;
2261  }
2262  }
2263  return ret;
2264 }
2265 
2267  int *decode_failed)
2268 {
2270  AVCodecContext *avctx = ist->dec_ctx;
2271  int ret, err = 0;
2272  AVRational decoded_frame_tb;
2273 
2274  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2275  return AVERROR(ENOMEM);
2276  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2277  return AVERROR(ENOMEM);
2278  decoded_frame = ist->decoded_frame;
2279 
2281  ret = decode(avctx, decoded_frame, got_output, pkt);
2282  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2283  if (ret < 0)
2284  *decode_failed = 1;
2285 
2286  if (ret >= 0 && avctx->sample_rate <= 0) {
2287  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2288  ret = AVERROR_INVALIDDATA;
2289  }
2290 
2291  if (ret != AVERROR_EOF)
2292  check_decode_result(ist, got_output, ret);
2293 
2294  if (!*got_output || ret < 0)
2295  return ret;
2296 
2297  ist->samples_decoded += decoded_frame->nb_samples;
2298  ist->frames_decoded++;
2299 
2300  /* increment next_dts to use for the case where the input stream does not
2301  have timestamps or there are multiple frames in the packet */
2302  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2303  avctx->sample_rate;
2304  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2305  avctx->sample_rate;
2306 
2307  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2308  decoded_frame_tb = ist->st->time_base;
2309  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2310  decoded_frame->pts = pkt->pts;
2311  decoded_frame_tb = ist->st->time_base;
2312  }else {
2313  decoded_frame->pts = ist->dts;
2314  decoded_frame_tb = AV_TIME_BASE_Q;
2315  }
2316  if (decoded_frame->pts != AV_NOPTS_VALUE)
2317  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2318  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2319  (AVRational){1, avctx->sample_rate});
2320  ist->nb_samples = decoded_frame->nb_samples;
2321  err = send_frame_to_filters(ist, decoded_frame);
2322 
2324  av_frame_unref(decoded_frame);
2325  return err < 0 ? err : ret;
2326 }
2327 
2328 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2329  int *decode_failed)
2330 {
2332  int i, ret = 0, err = 0;
2333  int64_t best_effort_timestamp;
2334  int64_t dts = AV_NOPTS_VALUE;
2335  AVPacket avpkt;
2336 
2337  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2338  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2339  // skip the packet.
2340  if (!eof && pkt && pkt->size == 0)
2341  return 0;
2342 
2343  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2344  return AVERROR(ENOMEM);
2345  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2346  return AVERROR(ENOMEM);
2347  decoded_frame = ist->decoded_frame;
2348  if (ist->dts != AV_NOPTS_VALUE)
2349  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2350  if (pkt) {
2351  avpkt = *pkt;
2352  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2353  }
2354 
2355  // The old code used to set dts on the drain packet, which does not work
2356  // with the new API anymore.
2357  if (eof) {
2358  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2359  if (!new)
2360  return AVERROR(ENOMEM);
2361  ist->dts_buffer = new;
2362  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2363  }
2364 
2366  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2367  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2368  if (ret < 0)
2369  *decode_failed = 1;
2370 
2371  // The following line may be required in some cases where there is no parser
2372  // or the parser does not has_b_frames correctly
2373  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2374  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2375  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2376  } else
2378  "video_delay is larger in decoder than demuxer %d > %d.\n"
2379  "If you want to help, upload a sample "
2380  "of this file to https://streams.videolan.org/upload/ "
2381  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2382  ist->dec_ctx->has_b_frames,
2383  ist->st->codecpar->video_delay);
2384  }
2385 
2386  if (ret != AVERROR_EOF)
2387  check_decode_result(ist, got_output, ret);
2388 
2389  if (*got_output && ret >= 0) {
2390  if (ist->dec_ctx->width != decoded_frame->width ||
2391  ist->dec_ctx->height != decoded_frame->height ||
2392  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2393  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2394  decoded_frame->width,
2395  decoded_frame->height,
2396  decoded_frame->format,
2397  ist->dec_ctx->width,
2398  ist->dec_ctx->height,
2399  ist->dec_ctx->pix_fmt);
2400  }
2401  }
2402 
2403  if (!*got_output || ret < 0)
2404  return ret;
2405 
2406  if(ist->top_field_first>=0)
2407  decoded_frame->top_field_first = ist->top_field_first;
2408 
2409  ist->frames_decoded++;
2410 
2411  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2412  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2413  if (err < 0)
2414  goto fail;
2415  }
2416  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2417 
2418  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2419  *duration_pts = decoded_frame->pkt_duration;
2420 
2421  if (ist->framerate.num)
2422  best_effort_timestamp = ist->cfr_next_pts++;
2423 
2424  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2425  best_effort_timestamp = ist->dts_buffer[0];
2426 
2427  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2428  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2429  ist->nb_dts_buffer--;
2430  }
2431 
2432  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2433  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2434 
2435  if (ts != AV_NOPTS_VALUE)
2436  ist->next_pts = ist->pts = ts;
2437  }
2438 
2439  if (debug_ts) {
2440  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2441  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2442  ist->st->index, av_ts2str(decoded_frame->pts),
2443  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2444  best_effort_timestamp,
2445  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2446  decoded_frame->key_frame, decoded_frame->pict_type,
2447  ist->st->time_base.num, ist->st->time_base.den);
2448  }
2449 
2450  if (ist->st->sample_aspect_ratio.num)
2451  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2452 
2453  err = send_frame_to_filters(ist, decoded_frame);
2454 
2455 fail:
2457  av_frame_unref(decoded_frame);
2458  return err < 0 ? err : ret;
2459 }
2460 
2462  int *decode_failed)
2463 {
2465  int free_sub = 1;
2466  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2467  &subtitle, got_output, pkt);
2468 
2469  check_decode_result(NULL, got_output, ret);
2470 
2471  if (ret < 0 || !*got_output) {
2472  *decode_failed = 1;
2473  if (!pkt->size)
2474  sub2video_flush(ist);
2475  return ret;
2476  }
2477 
2478  if (ist->fix_sub_duration) {
2479  int end = 1;
2480  if (ist->prev_sub.got_output) {
2481  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2482  1000, AV_TIME_BASE);
2483  if (end < ist->prev_sub.subtitle.end_display_time) {
2484  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2485  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2487  end <= 0 ? ", dropping it" : "");
2489  }
2490  }
2491  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2492  FFSWAP(int, ret, ist->prev_sub.ret);
2493  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2494  if (end <= 0)
2495  goto out;
2496  }
2497 
2498  if (!*got_output)
2499  return ret;
2500 
2501  if (ist->sub2video.frame) {
2502  sub2video_update(ist, INT64_MIN, &subtitle);
2503  } else if (ist->nb_filters) {
2504  if (!ist->sub2video.sub_queue)
2505  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2506  if (!ist->sub2video.sub_queue)
2507  exit_program(1);
2508  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2510  if (ret < 0)
2511  exit_program(1);
2512  }
2513  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2514  free_sub = 0;
2515  }
2516 
2517  if (!subtitle.num_rects)
2518  goto out;
2519 
2520  ist->frames_decoded++;
2521 
2522  for (i = 0; i < nb_output_streams; i++) {
2523  OutputStream *ost = output_streams[i];
2524 
2525  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2526  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2527  continue;
2528 
2529  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2530  }
2531 
2532 out:
2533  if (free_sub)
2534  avsubtitle_free(&subtitle);
2535  return ret;
2536 }
2537 
2539 {
2540  int i, ret;
2541  /* TODO keep pts also in stream time base to avoid converting back */
2542  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2544 
2545  for (i = 0; i < ist->nb_filters; i++) {
2546  ret = ifilter_send_eof(ist->filters[i], pts);
2547  if (ret < 0)
2548  return ret;
2549  }
2550  return 0;
2551 }
2552 
2553 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2554 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2555 {
2556  int ret = 0, i;
2557  int repeating = 0;
2558  int eof_reached = 0;
2559 
2560  AVPacket avpkt;
2561  if (!ist->saw_first_ts) {
2562  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2563  ist->pts = 0;
2564  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2565  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2566  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2567  }
2568  ist->saw_first_ts = 1;
2569  }
2570 
2571  if (ist->next_dts == AV_NOPTS_VALUE)
2572  ist->next_dts = ist->dts;
2573  if (ist->next_pts == AV_NOPTS_VALUE)
2574  ist->next_pts = ist->pts;
2575 
2576  if (!pkt) {
2577  /* EOF handling */
2578  av_init_packet(&avpkt);
2579  avpkt.data = NULL;
2580  avpkt.size = 0;
2581  } else {
2582  avpkt = *pkt;
2583  }
2584 
2585  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2586  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2587  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2588  ist->next_pts = ist->pts = ist->dts;
2589  }
2590 
2591  // while we have more to decode or while the decoder did output something on EOF
2592  while (ist->decoding_needed) {
2593  int64_t duration_dts = 0;
2594  int64_t duration_pts = 0;
2595  int got_output = 0;
2596  int decode_failed = 0;
2597 
2598  ist->pts = ist->next_pts;
2599  ist->dts = ist->next_dts;
2600 
2601  switch (ist->dec_ctx->codec_type) {
2602  case AVMEDIA_TYPE_AUDIO:
2603  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2604  &decode_failed);
2605  break;
2606  case AVMEDIA_TYPE_VIDEO:
2607  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2608  &decode_failed);
2609  if (!repeating || !pkt || got_output) {
2610  if (pkt && pkt->duration) {
2611  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2612  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2614  duration_dts = ((int64_t)AV_TIME_BASE *
2615  ist->dec_ctx->framerate.den * ticks) /
2617  }
2618 
2619  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2620  ist->next_dts += duration_dts;
2621  }else
2622  ist->next_dts = AV_NOPTS_VALUE;
2623  }
2624 
2625  if (got_output) {
2626  if (duration_pts > 0) {
2627  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2628  } else {
2629  ist->next_pts += duration_dts;
2630  }
2631  }
2632  break;
2633  case AVMEDIA_TYPE_SUBTITLE:
2634  if (repeating)
2635  break;
2636  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2637  if (!pkt && ret >= 0)
2638  ret = AVERROR_EOF;
2639  break;
2640  default:
2641  return -1;
2642  }
2643 
2644  if (ret == AVERROR_EOF) {
2645  eof_reached = 1;
2646  break;
2647  }
2648 
2649  if (ret < 0) {
2650  if (decode_failed) {
2651  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2652  ist->file_index, ist->st->index, av_err2str(ret));
2653  } else {
2654  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2655  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2656  }
2657  if (!decode_failed || exit_on_error)
2658  exit_program(1);
2659  break;
2660  }
2661 
2662  if (got_output)
2663  ist->got_output = 1;
2664 
2665  if (!got_output)
2666  break;
2667 
2668  // During draining, we might get multiple output frames in this loop.
2669  // ffmpeg.c does not drain the filter chain on configuration changes,
2670  // which means if we send multiple frames at once to the filters, and
2671  // one of those frames changes configuration, the buffered frames will
2672  // be lost. This can upset certain FATE tests.
2673  // Decode only 1 frame per call on EOF to appease these FATE tests.
2674  // The ideal solution would be to rewrite decoding to use the new
2675  // decoding API in a better way.
2676  if (!pkt)
2677  break;
2678 
2679  repeating = 1;
2680  }
2681 
2682  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2683  /* except when looping we need to flush but not to send an EOF */
2684  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2685  int ret = send_filter_eof(ist);
2686  if (ret < 0) {
2687  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2688  exit_program(1);
2689  }
2690  }
2691 
2692  /* handle stream copy */
2693  if (!ist->decoding_needed && pkt) {
2694  ist->dts = ist->next_dts;
2695  switch (ist->dec_ctx->codec_type) {
2696  case AVMEDIA_TYPE_AUDIO:
2697  av_assert1(pkt->duration >= 0);
2698  if (ist->dec_ctx->sample_rate) {
2699  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2700  ist->dec_ctx->sample_rate;
2701  } else {
2702  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2703  }
2704  break;
2705  case AVMEDIA_TYPE_VIDEO:
2706  if (ist->framerate.num) {
2707  // TODO: Remove work-around for c99-to-c89 issue 7
2708  AVRational time_base_q = AV_TIME_BASE_Q;
2709  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2710  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2711  } else if (pkt->duration) {
2712  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2713  } else if(ist->dec_ctx->framerate.num != 0) {
2714  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2715  ist->next_dts += ((int64_t)AV_TIME_BASE *
2716  ist->dec_ctx->framerate.den * ticks) /
2718  }
2719  break;
2720  }
2721  ist->pts = ist->dts;
2722  ist->next_pts = ist->next_dts;
2723  }
2724  for (i = 0; i < nb_output_streams; i++) {
2725  OutputStream *ost = output_streams[i];
2726 
2727  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2728  continue;
2729 
2730  do_streamcopy(ist, ost, pkt);
2731  }
2732 
2733  return !eof_reached;
2734 }
2735 
2736 static void print_sdp(void)
2737 {
2738  char sdp[16384];
2739  int i;
2740  int j;
2741  AVIOContext *sdp_pb;
2742  AVFormatContext **avc;
2743 
2744  for (i = 0; i < nb_output_files; i++) {
2745  if (!output_files[i]->header_written)
2746  return;
2747  }
2748 
2749  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2750  if (!avc)
2751  exit_program(1);
2752  for (i = 0, j = 0; i < nb_output_files; i++) {
2753  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2754  avc[j] = output_files[i]->ctx;
2755  j++;
2756  }
2757  }
2758 
2759  if (!j)
2760  goto fail;
2761 
2762  av_sdp_create(avc, j, sdp, sizeof(sdp));
2763 
2764  if (!sdp_filename) {
2765  printf("SDP:\n%s\n", sdp);
2766  fflush(stdout);
2767  } else {
2768  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2769  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2770  } else {
2771  avio_print(sdp_pb, sdp);
2772  avio_closep(&sdp_pb);
2774  }
2775  }
2776 
2777 fail:
2778  av_freep(&avc);
2779 }
2780 
2782 {
2783  InputStream *ist = s->opaque;
2784  const enum AVPixelFormat *p;
2785  int ret;
2786 
2787  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2789  const AVCodecHWConfig *config = NULL;
2790  int i;
2791 
2792  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2793  break;
2794 
2795  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2796  ist->hwaccel_id == HWACCEL_AUTO) {
2797  for (i = 0;; i++) {
2798  config = avcodec_get_hw_config(s->codec, i);
2799  if (!config)
2800  break;
2801  if (!(config->methods &
2803  continue;
2804  if (config->pix_fmt == *p)
2805  break;
2806  }
2807  }
2808  if (config) {
2809  if (config->device_type != ist->hwaccel_device_type) {
2810  // Different hwaccel offered, ignore.
2811  continue;
2812  }
2813 
2814  ret = hwaccel_decode_init(s);
2815  if (ret < 0) {
2816  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2818  "%s hwaccel requested for input stream #%d:%d, "
2819  "but cannot be initialized.\n",
2821  ist->file_index, ist->st->index);
2822  return AV_PIX_FMT_NONE;
2823  }
2824  continue;
2825  }
2826  } else {
2827  const HWAccel *hwaccel = NULL;
2828  int i;
2829  for (i = 0; hwaccels[i].name; i++) {
2830  if (hwaccels[i].pix_fmt == *p) {
2831  hwaccel = &hwaccels[i];
2832  break;
2833  }
2834  }
2835  if (!hwaccel) {
2836  // No hwaccel supporting this pixfmt.
2837  continue;
2838  }
2839  if (hwaccel->id != ist->hwaccel_id) {
2840  // Does not match requested hwaccel.
2841  continue;
2842  }
2843 
2844  ret = hwaccel->init(s);
2845  if (ret < 0) {
2847  "%s hwaccel requested for input stream #%d:%d, "
2848  "but cannot be initialized.\n", hwaccel->name,
2849  ist->file_index, ist->st->index);
2850  return AV_PIX_FMT_NONE;
2851  }
2852  }
2853 
2854  if (ist->hw_frames_ctx) {
2856  if (!s->hw_frames_ctx)
2857  return AV_PIX_FMT_NONE;
2858  }
2859 
2860  ist->hwaccel_pix_fmt = *p;
2861  break;
2862  }
2863 
2864  return *p;
2865 }
2866 
2868 {
2869  InputStream *ist = s->opaque;
2870 
2871  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2872  return ist->hwaccel_get_buffer(s, frame, flags);
2873 
2874  return avcodec_default_get_buffer2(s, frame, flags);
2875 }
2876 
2877 static int init_input_stream(int ist_index, char *error, int error_len)
2878 {
2879  int ret;
2880  InputStream *ist = input_streams[ist_index];
2881 
2882  if (ist->decoding_needed) {
2883  AVCodec *codec = ist->dec;
2884  if (!codec) {
2885  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2886  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2887  return AVERROR(EINVAL);
2888  }
2889 
2890  ist->dec_ctx->opaque = ist;
2891  ist->dec_ctx->get_format = get_format;
2892  ist->dec_ctx->get_buffer2 = get_buffer;
2893  ist->dec_ctx->thread_safe_callbacks = 1;
2894 
2895  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2896  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2897  (ist->decoding_needed & DECODING_FOR_OST)) {
2898  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2900  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2901  }
2902 
2903  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2904 
2905  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2906  * audio, and video decoders such as cuvid or mediacodec */
2907  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2908 
2909  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2910  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2911  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2913  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2914 
2915  ret = hw_device_setup_for_decode(ist);
2916  if (ret < 0) {
2917  snprintf(error, error_len, "Device setup failed for "
2918  "decoder on input stream #%d:%d : %s",
2919  ist->file_index, ist->st->index, av_err2str(ret));
2920  return ret;
2921  }
2922 
2923  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2924  if (ret == AVERROR_EXPERIMENTAL)
2925  abort_codec_experimental(codec, 0);
2926 
2927  snprintf(error, error_len,
2928  "Error while opening decoder for input stream "
2929  "#%d:%d : %s",
2930  ist->file_index, ist->st->index, av_err2str(ret));
2931  return ret;
2932  }
2934  }
2935 
2936  ist->next_pts = AV_NOPTS_VALUE;
2937  ist->next_dts = AV_NOPTS_VALUE;
2938 
2939  return 0;
2940 }
2941 
2943 {
2944  if (ost->source_index >= 0)
2945  return input_streams[ost->source_index];
2946  return NULL;
2947 }
2948 
2949 static int compare_int64(const void *a, const void *b)
2950 {
2951  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2952 }
2953 
2954 /* open the muxer when all the streams are initialized */
2956 {
2957  int ret, i;
2958 
2959  for (i = 0; i < of->ctx->nb_streams; i++) {
2960  OutputStream *ost = output_streams[of->ost_index + i];
2961  if (!ost->initialized)
2962  return 0;
2963  }
2964 
2965  of->ctx->interrupt_callback = int_cb;
2966 
2967  ret = avformat_write_header(of->ctx, &of->opts);
2968  if (ret < 0) {
2970  "Could not write header for output file #%d "
2971  "(incorrect codec parameters ?): %s\n",
2972  file_index, av_err2str(ret));
2973  return ret;
2974  }
2975  //assert_avoptions(of->opts);
2976  of->header_written = 1;
2977 
2978  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2979 
2980  if (sdp_filename || want_sdp)
2981  print_sdp();
2982 
2983  /* flush the muxing queues */
2984  for (i = 0; i < of->ctx->nb_streams; i++) {
2985  OutputStream *ost = output_streams[of->ost_index + i];
2986 
2987  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2988  if (!av_fifo_size(ost->muxing_queue))
2989  ost->mux_timebase = ost->st->time_base;
2990 
2991  while (av_fifo_size(ost->muxing_queue)) {
2992  AVPacket pkt;
2993  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2994  write_packet(of, &pkt, ost, 1);
2995  }
2996  }
2997 
2998  return 0;
2999 }
3000 
3002 {
3003  AVBSFContext *ctx = ost->bsf_ctx;
3004  int ret;
3005 
3006  if (!ctx)
3007  return 0;
3008 
3009  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3010  if (ret < 0)
3011  return ret;
3012 
3013  ctx->time_base_in = ost->st->time_base;
3014 
3015  ret = av_bsf_init(ctx);
3016  if (ret < 0) {
3017  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3018  ctx->filter->name);
3019  return ret;
3020  }
3021 
3022  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3023  if (ret < 0)
3024  return ret;
3025  ost->st->time_base = ctx->time_base_out;
3026 
3027  return 0;
3028 }
3029 
3031 {
3032  OutputFile *of = output_files[ost->file_index];
3033  InputStream *ist = get_input_stream(ost);
3034  AVCodecParameters *par_dst = ost->st->codecpar;
3035  AVCodecParameters *par_src = ost->ref_par;
3036  AVRational sar;
3037  int i, ret;
3038  uint32_t codec_tag = par_dst->codec_tag;
3039 
3040  av_assert0(ist && !ost->filter);
3041 
3042  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3043  if (ret >= 0)
3044  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3045  if (ret < 0) {
3047  "Error setting up codec context options.\n");
3048  return ret;
3049  }
3050 
3051  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3052  if (ret < 0) {
3054  "Error getting reference codec parameters.\n");
3055  return ret;
3056  }
3057 
3058  if (!codec_tag) {
3059  unsigned int codec_tag_tmp;
3060  if (!of->ctx->oformat->codec_tag ||
3061  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3062  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3063  codec_tag = par_src->codec_tag;
3064  }
3065 
3066  ret = avcodec_parameters_copy(par_dst, par_src);
3067  if (ret < 0)
3068  return ret;
3069 
3070  par_dst->codec_tag = codec_tag;
3071 
3072  if (!ost->frame_rate.num)
3073  ost->frame_rate = ist->framerate;
3074  ost->st->avg_frame_rate = ost->frame_rate;
3075 
3077  if (ret < 0)
3078  return ret;
3079 
3080  // copy timebase while removing common factors
3081  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3083 
3084  // copy estimated duration as a hint to the muxer
3085  if (ost->st->duration <= 0 && ist->st->duration > 0)
3086  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3087 
3088  // copy disposition
3089  ost->st->disposition = ist->st->disposition;
3090 
3091  if (ist->st->nb_side_data) {
3092  for (i = 0; i < ist->st->nb_side_data; i++) {
3093  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3094  uint8_t *dst_data;
3095 
3096  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3097  if (!dst_data)
3098  return AVERROR(ENOMEM);
3099  memcpy(dst_data, sd_src->data, sd_src->size);
3100  }
3101  }
3102 
3103  if (ost->rotate_overridden) {
3105  sizeof(int32_t) * 9);
3106  if (sd)
3108  }
3109 
3110  switch (par_dst->codec_type) {
3111  case AVMEDIA_TYPE_AUDIO:
3112  if (audio_volume != 256) {
3113  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3114  exit_program(1);
3115  }
3116  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3117  par_dst->block_align= 0;
3118  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3119  par_dst->block_align= 0;
3120  break;
3121  case AVMEDIA_TYPE_VIDEO:
3122  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3123  sar =
3125  (AVRational){ par_dst->height, par_dst->width });
3126  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3127  "with stream copy may produce invalid files\n");
3128  }
3129  else if (ist->st->sample_aspect_ratio.num)
3130  sar = ist->st->sample_aspect_ratio;
3131  else
3132  sar = par_src->sample_aspect_ratio;
3133  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3134  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3135  ost->st->r_frame_rate = ist->st->r_frame_rate;
3136  break;
3137  }
3138 
3139  ost->mux_timebase = ist->st->time_base;
3140 
3141  return 0;
3142 }
3143 
3145 {
3146  AVDictionaryEntry *e;
3147 
3148  uint8_t *encoder_string;
3149  int encoder_string_len;
3150  int format_flags = 0;
3151  int codec_flags = ost->enc_ctx->flags;
3152 
3153  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3154  return;
3155 
3156  e = av_dict_get(of->opts, "fflags", NULL, 0);
3157  if (e) {
3158  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3159  if (!o)
3160  return;
3161  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3162  }
3163  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3164  if (e) {
3165  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3166  if (!o)
3167  return;
3168  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3169  }
3170 
3171  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3172  encoder_string = av_mallocz(encoder_string_len);
3173  if (!encoder_string)
3174  exit_program(1);
3175 
3176  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3177  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3178  else
3179  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3180  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3181  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3183 }
3184 
3185 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3186  AVCodecContext *avctx)
3187 {
3188  char *p;
3189  int n = 1, i, size, index = 0;
3190  int64_t t, *pts;
3191 
3192  for (p = kf; *p; p++)
3193  if (*p == ',')
3194  n++;
3195  size = n;
3196  pts = av_malloc_array(size, sizeof(*pts));
3197  if (!pts) {
3198  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3199  exit_program(1);
3200  }
3201 
3202  p = kf;
3203  for (i = 0; i < n; i++) {
3204  char *next = strchr(p, ',');
3205 
3206  if (next)
3207  *next++ = 0;
3208 
3209  if (!memcmp(p, "chapters", 8)) {
3210 
3211  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3212  int j;
3213 
3214  if (avf->nb_chapters > INT_MAX - size ||
3215  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3216  sizeof(*pts)))) {
3218  "Could not allocate forced key frames array.\n");
3219  exit_program(1);
3220  }
3221  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3222  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3223 
3224  for (j = 0; j < avf->nb_chapters; j++) {
3225  AVChapter *c = avf->chapters[j];
3226  av_assert1(index < size);
3227  pts[index++] = av_rescale_q(c->start, c->time_base,
3228  avctx->time_base) + t;
3229  }
3230 
3231  } else {
3232 
3233  t = parse_time_or_die("force_key_frames", p, 1);
3234  av_assert1(index < size);
3235  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3236 
3237  }
3238 
3239  p = next;
3240  }
3241 
3242  av_assert0(index == size);
3243  qsort(pts, size, sizeof(*pts), compare_int64);
3244  ost->forced_kf_count = size;
3245  ost->forced_kf_pts = pts;
3246 }
3247 
3248 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3249 {
3250  InputStream *ist = get_input_stream(ost);
3251  AVCodecContext *enc_ctx = ost->enc_ctx;
3252  AVFormatContext *oc;
3253 
3254  if (ost->enc_timebase.num > 0) {
3255  enc_ctx->time_base = ost->enc_timebase;
3256  return;
3257  }
3258 
3259  if (ost->enc_timebase.num < 0) {
3260  if (ist) {
3261  enc_ctx->time_base = ist->st->time_base;
3262  return;
3263  }
3264 
3265  oc = output_files[ost->file_index]->ctx;
3266  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3267  }
3268 
3269  enc_ctx->time_base = default_time_base;
3270 }
3271 
3273 {
3274  InputStream *ist = get_input_stream(ost);
3275  AVCodecContext *enc_ctx = ost->enc_ctx;
3277  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3278  int j, ret;
3279 
3280  set_encoder_id(output_files[ost->file_index], ost);
3281 
3282  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3283  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3284  // which have to be filtered out to prevent leaking them to output files.
3285  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3286 
3287  if (ist) {
3288  ost->st->disposition = ist->st->disposition;
3289 
3290  dec_ctx = ist->dec_ctx;
3291 
3292  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3293  } else {
3294  for (j = 0; j < oc->nb_streams; j++) {
3295  AVStream *st = oc->streams[j];
3296  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3297  break;
3298  }
3299  if (j == oc->nb_streams)
3300  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3303  }
3304 
3305  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3306  if (!ost->frame_rate.num)
3308  if (ist && !ost->frame_rate.num)
3309  ost->frame_rate = ist->framerate;
3310  if (ist && !ost->frame_rate.num)
3311  ost->frame_rate = ist->st->r_frame_rate;
3312  if (ist && !ost->frame_rate.num) {
3313  ost->frame_rate = (AVRational){25, 1};
3315  "No information "
3316  "about the input framerate is available. Falling "
3317  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3318  "if you want a different framerate.\n",
3319  ost->file_index, ost->index);
3320  }
3321 
3322  if (ost->enc->supported_framerates && !ost->force_fps) {
3323  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3324  ost->frame_rate = ost->enc->supported_framerates[idx];
3325  }
3326  // reduce frame rate for mpeg4 to be within the spec limits
3327  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3328  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3329  ost->frame_rate.num, ost->frame_rate.den, 65535);
3330  }
3331  }
3332 
3333  switch (enc_ctx->codec_type) {
3334  case AVMEDIA_TYPE_AUDIO:
3336  if (dec_ctx)
3337  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3338  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3342 
3343  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3344  break;
3345 
3346  case AVMEDIA_TYPE_VIDEO:
3348 
3349  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3351  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3353  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3354  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3355  }
3356 
3357  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3358  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3359  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3360  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3361  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3363 
3364  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3365  if (dec_ctx)
3366  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3367  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3368 
3369  enc_ctx->framerate = ost->frame_rate;
3370 
3371  ost->st->avg_frame_rate = ost->frame_rate;
3372 
3373  if (!dec_ctx ||
3374  enc_ctx->width != dec_ctx->width ||
3375  enc_ctx->height != dec_ctx->height ||
3376  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3378  }
3379 
3380  if (ost->top_field_first == 0) {
3381  enc_ctx->field_order = AV_FIELD_BB;
3382  } else if (ost->top_field_first == 1) {
3383  enc_ctx->field_order = AV_FIELD_TT;
3384  }
3385 
3386  if (ost->forced_keyframes) {
3387  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3390  if (ret < 0) {
3392  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3393  return ret;
3394  }
3399 
3400  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3401  // parse it only for static kf timings
3402  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3404  }
3405  }
3406  break;
3407  case AVMEDIA_TYPE_SUBTITLE:
3408  enc_ctx->time_base = AV_TIME_BASE_Q;
3409  if (!enc_ctx->width) {
3410  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3411  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3412  }
3413  break;
3414  case AVMEDIA_TYPE_DATA:
3415  break;
3416  default:
3417  abort();
3418  break;
3419  }
3420 
3421  ost->mux_timebase = enc_ctx->time_base;
3422 
3423  return 0;
3424 }
3425 
3426 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3427 {
3428  int ret = 0;
3429 
3430  if (ost->encoding_needed) {
3431  AVCodec *codec = ost->enc;
3432  AVCodecContext *dec = NULL;
3433  InputStream *ist;
3434 
3435  ret = init_output_stream_encode(ost);
3436  if (ret < 0)
3437  return ret;
3438 
3439  if ((ist = get_input_stream(ost)))
3440  dec = ist->dec_ctx;
3441  if (dec && dec->subtitle_header) {
3442  /* ASS code assumes this buffer is null terminated so add extra byte. */
3444  if (!ost->enc_ctx->subtitle_header)
3445  return AVERROR(ENOMEM);
3446  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3448  }
3449  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3450  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3451  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3452  !codec->defaults &&
3453  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3454  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3455  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3456 
3457  ret = hw_device_setup_for_encode(ost);
3458  if (ret < 0) {
3459  snprintf(error, error_len, "Device setup failed for "
3460  "encoder on output stream #%d:%d : %s",
3461  ost->file_index, ost->index, av_err2str(ret));
3462  return ret;
3463  }
3464 
3465  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3466  int input_props = 0, output_props = 0;
3467  AVCodecDescriptor const *input_descriptor =
3469  AVCodecDescriptor const *output_descriptor =
3471  if (input_descriptor)
3472  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3473  if (output_descriptor)
3474  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3475  if (input_props && output_props && input_props != output_props) {
3476  snprintf(error, error_len,
3477  "Subtitle encoding currently only possible from text to text "
3478  "or bitmap to bitmap");
3479  return AVERROR_INVALIDDATA;
3480  }
3481  }
3482 
3483  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3484  if (ret == AVERROR_EXPERIMENTAL)
3485  abort_codec_experimental(codec, 1);
3486  snprintf(error, error_len,
3487  "Error while opening encoder for output stream #%d:%d - "
3488  "maybe incorrect parameters such as bit_rate, rate, width or height",
3489  ost->file_index, ost->index);
3490  return ret;
3491  }
3492  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3493  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3495  ost->enc_ctx->frame_size);
3497  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3498  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3499  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3500  " It takes bits/s as argument, not kbits/s\n");
3501 
3503  if (ret < 0) {
3505  "Error initializing the output stream codec context.\n");
3506  exit_program(1);
3507  }
3508  /*
3509  * FIXME: ost->st->codec should't be needed here anymore.
3510  */
3511  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3512  if (ret < 0)
3513  return ret;
3514 
3515  if (ost->enc_ctx->nb_coded_side_data) {
3516  int i;
3517 
3518  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3519  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3520  uint8_t *dst_data;
3521 
3522  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3523  if (!dst_data)
3524  return AVERROR(ENOMEM);
3525  memcpy(dst_data, sd_src->data, sd_src->size);
3526  }
3527  }
3528 
3529  /*
3530  * Add global input side data. For now this is naive, and copies it
3531  * from the input stream's global side data. All side data should
3532  * really be funneled over AVFrame and libavfilter, then added back to
3533  * packet side data, and then potentially using the first packet for
3534  * global side data.
3535  */
3536  if (ist) {
3537  int i;
3538  for (i = 0; i < ist->st->nb_side_data; i++) {
3539  AVPacketSideData *sd = &ist->st->side_data[i];
3540  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3541  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3542  if (!dst)
3543  return AVERROR(ENOMEM);
3544  memcpy(dst, sd->data, sd->size);
3545  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3546  av_display_rotation_set((uint32_t *)dst, 0);
3547  }
3548  }
3549  }
3550 
3551  // copy timebase while removing common factors
3552  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3553  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3554 
3555  // copy estimated duration as a hint to the muxer
3556  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3557  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3558 
3559  ost->st->codec->codec= ost->enc_ctx->codec;
3560  } else if (ost->stream_copy) {
3561  ret = init_output_stream_streamcopy(ost);
3562  if (ret < 0)
3563  return ret;
3564  }
3565 
3566  // parse user provided disposition, and update stream values
3567  if (ost->disposition) {
3568  static const AVOption opts[] = {
3569  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3570  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3571  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3572  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3573  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3574  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3575  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3576  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3577  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3578  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3579  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3580  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3581  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3582  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3583  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3584  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3585  { NULL },
3586  };
3587  static const AVClass class = {
3588  .class_name = "",
3589  .item_name = av_default_item_name,
3590  .option = opts,
3591  .version = LIBAVUTIL_VERSION_INT,
3592  };
3593  const AVClass *pclass = &class;
3594 
3595  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3596  if (ret < 0)
3597  return ret;
3598  }
3599 
3600  /* initialize bitstream filters for the output stream
3601  * needs to be done here, because the codec id for streamcopy is not
3602  * known until now */
3603  ret = init_output_bsfs(ost);
3604  if (ret < 0)
3605  return ret;
3606 
3607  ost->initialized = 1;
3608 
3609  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3610  if (ret < 0)
3611  return ret;
3612 
3613  return ret;
3614 }
3615 
3616 static void report_new_stream(int input_index, AVPacket *pkt)
3617 {
3618  InputFile *file = input_files[input_index];
3619  AVStream *st = file->ctx->streams[pkt->stream_index];
3620 
3621  if (pkt->stream_index < file->nb_streams_warn)
3622  return;
3623  av_log(file->ctx, AV_LOG_WARNING,
3624  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3626  input_index, pkt->stream_index,
3627  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3628  file->nb_streams_warn = pkt->stream_index + 1;
3629 }
3630 
3631 static int transcode_init(void)
3632 {
3633  int ret = 0, i, j, k;
3634  AVFormatContext *oc;
3635  OutputStream *ost;
3636  InputStream *ist;
3637  char error[1024] = {0};
3638 
3639  for (i = 0; i < nb_filtergraphs; i++) {
3640  FilterGraph *fg = filtergraphs[i];
3641  for (j = 0; j < fg->nb_outputs; j++) {
3642  OutputFilter *ofilter = fg->outputs[j];
3643  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3644  continue;
3645  if (fg->nb_inputs != 1)
3646  continue;
3647  for (k = nb_input_streams-1; k >= 0 ; k--)
3648  if (fg->inputs[0]->ist == input_streams[k])
3649  break;
3650  ofilter->ost->source_index = k;
3651  }
3652  }
3653 
3654  /* init framerate emulation */
3655  for (i = 0; i < nb_input_files; i++) {
3656  InputFile *ifile = input_files[i];
3657  if (ifile->rate_emu)
3658  for (j = 0; j < ifile->nb_streams; j++)
3659  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3660  }
3661 
3662  /* init input streams */
3663  for (i = 0; i < nb_input_streams; i++)
3664  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3665  for (i = 0; i < nb_output_streams; i++) {
3666  ost = output_streams[i];
3667  avcodec_close(ost->enc_ctx);
3668  }
3669  goto dump_format;
3670  }
3671 
3672  /* open each encoder */
3673  for (i = 0; i < nb_output_streams; i++) {
3674  // skip streams fed from filtergraphs until we have a frame for them
3675  if (output_streams[i]->filter)
3676  continue;
3677 
3678  ret = init_output_stream_wrapper(output_streams[i], 0);
3679  if (ret < 0)
3680  goto dump_format;
3681  }
3682 
3683  /* discard unused programs */
3684  for (i = 0; i < nb_input_files; i++) {
3685  InputFile *ifile = input_files[i];
3686  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3687  AVProgram *p = ifile->ctx->programs[j];
3688  int discard = AVDISCARD_ALL;
3689 
3690  for (k = 0; k < p->nb_stream_indexes; k++)
3691  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3692  discard = AVDISCARD_DEFAULT;
3693  break;
3694  }
3695  p->discard = discard;
3696  }
3697  }
3698 
3699  /* write headers for files with no streams */
3700  for (i = 0; i < nb_output_files; i++) {
3701  oc = output_files[i]->ctx;
3702  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3703  ret = check_init_output_file(output_files[i], i);
3704  if (ret < 0)
3705  goto dump_format;
3706  }
3707  }
3708 
3709  dump_format:
3710  /* dump the stream mapping */
3711  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3712  for (i = 0; i < nb_input_streams; i++) {
3713  ist = input_streams[i];
3714 
3715  for (j = 0; j < ist->nb_filters; j++) {
3716  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3717  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3718  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3719  ist->filters[j]->name);
3720  if (nb_filtergraphs > 1)
3721  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3722  av_log(NULL, AV_LOG_INFO, "\n");
3723  }
3724  }
3725  }
3726 
3727  for (i = 0; i < nb_output_streams; i++) {
3728  ost = output_streams[i];
3729 
3730  if (ost->attachment_filename) {
3731  /* an attached file */
3732  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3733  ost->attachment_filename, ost->file_index, ost->index);
3734  continue;
3735  }
3736 
3737  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3738  /* output from a complex graph */
3739  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3740  if (nb_filtergraphs > 1)
3741  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3742 
3743  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3744  ost->index, ost->enc ? ost->enc->name : "?");
3745  continue;
3746  }
3747 
3748  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3749  input_streams[ost->source_index]->file_index,
3750  input_streams[ost->source_index]->st->index,
3751  ost->file_index,
3752  ost->index);
3753  if (ost->sync_ist != input_streams[ost->source_index])
3754  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3755  ost->sync_ist->file_index,
3756  ost->sync_ist->st->index);
3757  if (ost->stream_copy)
3758  av_log(NULL, AV_LOG_INFO, " (copy)");
3759  else {
3760  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3761  const AVCodec *out_codec = ost->enc;
3762  const char *decoder_name = "?";
3763  const char *in_codec_name = "?";
3764  const char *encoder_name = "?";
3765  const char *out_codec_name = "?";
3766  const AVCodecDescriptor *desc;
3767 
3768  if (in_codec) {
3769  decoder_name = in_codec->name;
3770  desc = avcodec_descriptor_get(in_codec->id);
3771  if (desc)
3772  in_codec_name = desc->name;
3773  if (!strcmp(decoder_name, in_codec_name))
3774  decoder_name = "native";
3775  }
3776 
3777  if (out_codec) {
3778  encoder_name = out_codec->name;
3779  desc = avcodec_descriptor_get(out_codec->id);
3780  if (desc)
3781  out_codec_name = desc->name;
3782  if (!strcmp(encoder_name, out_codec_name))
3783  encoder_name = "native";
3784  }
3785 
3786  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3787  in_codec_name, decoder_name,
3788  out_codec_name, encoder_name);
3789  }
3790  av_log(NULL, AV_LOG_INFO, "\n");
3791  }
3792 
3793  if (ret) {
3794  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3795  return ret;
3796  }
3797 
3799 
3800  return 0;
3801 }
3802 
3803 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3804 static int need_output(void)
3805 {
3806  int i;
3807 
3808  for (i = 0; i < nb_output_streams; i++) {
3809  OutputStream *ost = output_streams[i];
3810  OutputFile *of = output_files[ost->file_index];
3811  AVFormatContext *os = output_files[ost->file_index]->ctx;
3812 
3813  if (ost->finished ||
3814  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3815  continue;
3816  if (ost->frame_number >= ost->max_frames) {
3817  int j;
3818  for (j = 0; j < of->ctx->nb_streams; j++)
3819  close_output_stream(output_streams[of->ost_index + j]);
3820  continue;
3821  }
3822 
3823  return 1;
3824  }
3825 
3826  return 0;
3827 }
3828 
3829 /**
3830  * Select the output stream to process.
3831  *
3832  * @return selected output stream, or NULL if none available
3833  */
3835 {
3836  int i;
3837  int64_t opts_min = INT64_MAX;
3838  OutputStream *ost_min = NULL;
3839 
3840  for (i = 0; i < nb_output_streams; i++) {
3841  OutputStream *ost = output_streams[i];
3842  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3843  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3844  AV_TIME_BASE_Q);
3845  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3847  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3848  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3849 
3850  if (!ost->initialized && !ost->inputs_done)
3851  return ost;
3852 
3853  if (!ost->finished && opts < opts_min) {
3854  opts_min = opts;
3855  ost_min = ost->unavailable ? NULL : ost;
3856  }
3857  }
3858  return ost_min;
3859 }
3860 
3861 static void set_tty_echo(int on)
3862 {
3863 #if HAVE_TERMIOS_H
3864  struct termios tty;
3865  if (tcgetattr(0, &tty) == 0) {
3866  if (on) tty.c_lflag |= ECHO;
3867  else tty.c_lflag &= ~ECHO;
3868  tcsetattr(0, TCSANOW, &tty);
3869  }
3870 #endif
3871 }
3872 
3873 static int check_keyboard_interaction(int64_t cur_time)
3874 {
3875  int i, ret, key;
3876  static int64_t last_time;
3877  if (received_nb_signals)
3878  return AVERROR_EXIT;
3879  /* read_key() returns 0 on EOF */
3880  if(cur_time - last_time >= 100000 && !run_as_daemon){
3881  key = read_key();
3882  last_time = cur_time;
3883  }else
3884  key = -1;
3885  if (key == 'q')
3886  return AVERROR_EXIT;
3887  if (key == '+') av_log_set_level(av_log_get_level()+10);
3888  if (key == '-') av_log_set_level(av_log_get_level()-10);
3889  if (key == 's') qp_hist ^= 1;
3890  if (key == 'h'){
3891  if (do_hex_dump){
3892  do_hex_dump = do_pkt_dump = 0;
3893  } else if(do_pkt_dump){
3894  do_hex_dump = 1;
3895  } else
3896  do_pkt_dump = 1;
3898  }
3899  if (key == 'c' || key == 'C'){
3900  char buf[4096], target[64], command[256], arg[256] = {0};
3901  double time;
3902  int k, n = 0;
3903  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3904  i = 0;
3905  set_tty_echo(1);
3906  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3907  if (k > 0)
3908  buf[i++] = k;
3909  buf[i] = 0;
3910  set_tty_echo(0);
3911  fprintf(stderr, "\n");
3912  if (k > 0 &&
3913  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3914  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3915  target, time, command, arg);
3916  for (i = 0; i < nb_filtergraphs; i++) {
3917  FilterGraph *fg = filtergraphs[i];
3918  if (fg->graph) {
3919  if (time < 0) {
3920  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3921  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3922  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3923  } else if (key == 'c') {
3924  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3925  ret = AVERROR_PATCHWELCOME;
3926  } else {
3927  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3928  if (ret < 0)
3929  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3930  }
3931  }
3932  }
3933  } else {
3935  "Parse error, at least 3 arguments were expected, "
3936  "only %d given in string '%s'\n", n, buf);
3937  }
3938  }
3939  if (key == 'd' || key == 'D'){
3940  int debug=0;
3941  if(key == 'D') {
3942  debug = input_streams[0]->st->codec->debug<<1;
3943  if(!debug) debug = 1;
3944  while(debug & (FF_DEBUG_DCT_COEFF
3945 #if FF_API_DEBUG_MV
3946  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3947 #endif
3948  )) //unsupported, would just crash
3949  debug += debug;
3950  }else{
3951  char buf[32];
3952  int k = 0;
3953  i = 0;
3954  set_tty_echo(1);
3955  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3956  if (k > 0)
3957  buf[i++] = k;
3958  buf[i] = 0;
3959  set_tty_echo(0);
3960  fprintf(stderr, "\n");
3961  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3962  fprintf(stderr,"error parsing debug value\n");
3963  }
3964  for(i=0;i<nb_input_streams;i++) {
3965  input_streams[i]->st->codec->debug = debug;
3966  }
3967  for(i=0;i<nb_output_streams;i++) {
3968  OutputStream *ost = output_streams[i];
3969  ost->enc_ctx->debug = debug;
3970  }
3971  if(debug) av_log_set_level(AV_LOG_DEBUG);
3972  fprintf(stderr,"debug=%d\n", debug);
3973  }
3974  if (key == '?'){
3975  fprintf(stderr, "key function\n"
3976  "? show this help\n"
3977  "+ increase verbosity\n"
3978  "- decrease verbosity\n"
3979  "c Send command to first matching filter supporting it\n"
3980  "C Send/Queue command to all matching filters\n"
3981  "D cycle through available debug modes\n"
3982  "h dump packets/hex press to cycle through the 3 states\n"
3983  "q quit\n"
3984  "s Show QP histogram\n"
3985  );
3986  }
3987  return 0;
3988 }
3989 
3990 #if HAVE_THREADS
3991 static void *input_thread(void *arg)
3992 {
3993  InputFile *f = arg;
3994  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3995  int ret = 0;
3996 
3997  while (1) {
3998  AVPacket pkt;
3999  ret = av_read_frame(f->ctx, &pkt);
4000 
4001  if (ret == AVERROR(EAGAIN)) {
4002  av_usleep(10000);
4003  continue;
4004  }
4005  if (ret < 0) {
4006  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4007  break;
4008  }
4009  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4010  if (flags && ret == AVERROR(EAGAIN)) {
4011  flags = 0;
4012  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4014  "Thread message queue blocking; consider raising the "
4015  "thread_queue_size option (current value: %d)\n",
4016  f->thread_queue_size);
4017  }
4018  if (ret < 0) {
4019  if (ret != AVERROR_EOF)
4020  av_log(f->ctx, AV_LOG_ERROR,
4021  "Unable to send packet to main thread: %s\n",
4022  av_err2str(ret));
4023  av_packet_unref(&pkt);
4024  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4025  break;
4026  }
4027  }
4028 
4029  return NULL;
4030 }
4031 
4032 static void free_input_thread(int i)
4033 {
4034  InputFile *f = input_files[i];
4035  AVPacket pkt;
4036 
4037  if (!f || !f->in_thread_queue)
4038  return;
4040  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4041  av_packet_unref(&pkt);
4042 
4043  pthread_join(f->thread, NULL);
4044  f->joined = 1;
4045  av_thread_message_queue_free(&f->in_thread_queue);
4046 }
4047 
4048 static void free_input_threads(void)
4049 {
4050  int i;
4051 
4052  for (i = 0; i < nb_input_files; i++)
4053  free_input_thread(i);
4054 }
4055 
4056 static int init_input_thread(int i)
4057 {
4058  int ret;
4059  InputFile *f = input_files[i];
4060 
4061  if (f->thread_queue_size < 0)
4062  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4063  if (!f->thread_queue_size)
4064  return 0;
4065 
4066  if (f->ctx->pb ? !f->ctx->pb->seekable :
4067  strcmp(f->ctx->iformat->name, "lavfi"))
4068  f->non_blocking = 1;
4069  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4070  f->thread_queue_size, sizeof(AVPacket));
4071  if (ret < 0)
4072  return ret;
4073 
4074  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4075  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4076  av_thread_message_queue_free(&f->in_thread_queue);
4077  return AVERROR(ret);
4078  }
4079 
4080  return 0;
4081 }
4082 
4083 static int init_input_threads(void)
4084 {
4085  int i, ret;
4086 
4087  for (i = 0; i < nb_input_files; i++) {
4088  ret = init_input_thread(i);
4089  if (ret < 0)
4090  return ret;
4091  }
4092  return 0;
4093 }
4094 
4095 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4096 {
4097  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4098  f->non_blocking ?
4100 }
4101 #endif
4102 
4104 {
4105  if (f->rate_emu) {
4106  int i;
4107  for (i = 0; i < f->nb_streams; i++) {
4108  InputStream *ist = input_streams[f->ist_index + i];
4109  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4110  int64_t now = av_gettime_relative() - ist->start;
4111  if (pts > now)
4112  return AVERROR(EAGAIN);
4113  }
4114  }
4115 
4116 #if HAVE_THREADS
4117  if (f->thread_queue_size)
4118  return get_input_packet_mt(f, pkt);
4119 #endif
4120  return av_read_frame(f->ctx, pkt);
4121 }
4122 
4123 static int got_eagain(void)
4124 {
4125  int i;
4126  for (i = 0; i < nb_output_streams; i++)
4127  if (output_streams[i]->unavailable)
4128  return 1;
4129  return 0;
4130 }
4131 
4132 static void reset_eagain(void)
4133 {
4134  int i;
4135  for (i = 0; i < nb_input_files; i++)
4136  input_files[i]->eagain = 0;
4137  for (i = 0; i < nb_output_streams; i++)
4138  output_streams[i]->unavailable = 0;
4139 }
4140 
4141 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4142 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4143  AVRational time_base)
4144 {
4145  int ret;
4146 
4147  if (!*duration) {
4148  *duration = tmp;
4149  return tmp_time_base;
4150  }
4151 
4152  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4153  if (ret < 0) {
4154  *duration = tmp;
4155  return tmp_time_base;
4156  }
4157 
4158  return time_base;
4159 }
4160 
4162 {
4163  InputStream *ist;
4164  AVCodecContext *avctx;
4165  int i, ret, has_audio = 0;
4166  int64_t duration = 0;
4167 
4168  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4169  if (ret < 0)
4170  return ret;
4171 
4172  for (i = 0; i < ifile->nb_streams; i++) {
4173  ist = input_streams[ifile->ist_index + i];
4174  avctx = ist->dec_ctx;
4175 
4176  /* duration is the length of the last frame in a stream
4177  * when audio stream is present we don't care about
4178  * last video frame length because it's not defined exactly */
4179  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4180  has_audio = 1;
4181  }
4182 
4183  for (i = 0; i < ifile->nb_streams; i++) {
4184  ist = input_streams[ifile->ist_index + i];
4185  avctx = ist->dec_ctx;
4186 
4187  if (has_audio) {
4188  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4189  AVRational sample_rate = {1, avctx->sample_rate};
4190 
4191  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4192  } else {
4193  continue;
4194  }
4195  } else {
4196  if (ist->framerate.num) {
4197  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4198  } else if (ist->st->avg_frame_rate.num) {
4199  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4200  } else {
4201  duration = 1;
4202  }
4203  }
4204  if (!ifile->duration)
4205  ifile->time_base = ist->st->time_base;
4206  /* the total duration of the stream, max_pts - min_pts is
4207  * the duration of the stream without the last frame */
4208  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4209  duration += ist->max_pts - ist->min_pts;
4210  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4211  ifile->time_base);
4212  }
4213 
4214  if (ifile->loop > 0)
4215  ifile->loop--;
4216 
4217  return ret;
4218 }
4219 
4220 /*
4221  * Return
4222  * - 0 -- one packet was read and processed
4223  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4224  * this function should be called again
4225  * - AVERROR_EOF -- this function should not be called again
4226  */
4227 static int process_input(int file_index)
4228 {
4229  InputFile *ifile = input_files[file_index];
4231  InputStream *ist;
4232  AVPacket pkt;
4233  int ret, thread_ret, i, j;
4234  int64_t duration;
4235  int64_t pkt_dts;
4236  int disable_discontinuity_correction = copy_ts;
4237 
4238  is = ifile->ctx;
4239  ret = get_input_packet(ifile, &pkt);
4240 
4241  if (ret == AVERROR(EAGAIN)) {
4242  ifile->eagain = 1;
4243  return ret;
4244  }
4245  if (ret < 0 && ifile->loop) {
4246  AVCodecContext *avctx;
4247  for (i = 0; i < ifile->nb_streams; i++) {
4248  ist = input_streams[ifile->ist_index + i];
4249  avctx = ist->dec_ctx;
4250  if (ist->decoding_needed) {
4251  ret = process_input_packet(ist, NULL, 1);
4252  if (ret>0)
4253  return 0;
4254  avcodec_flush_buffers(avctx);
4255  }
4256  }
4257 #if HAVE_THREADS
4258  free_input_thread(file_index);
4259 #endif
4260  ret = seek_to_start(ifile, is);
4261 #if HAVE_THREADS
4262  thread_ret = init_input_thread(file_index);
4263  if (thread_ret < 0)
4264  return thread_ret;
4265 #endif
4266  if (ret < 0)
4267  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4268  else
4269  ret = get_input_packet(ifile, &pkt);
4270  if (ret == AVERROR(EAGAIN)) {
4271  ifile->eagain = 1;
4272  return ret;
4273  }
4274  }
4275  if (ret < 0) {
4276  if (ret != AVERROR_EOF) {
4277  print_error(is->url, ret);
4278  if (exit_on_error)
4279  exit_program(1);
4280  }
4281 
4282  for (i = 0; i < ifile->nb_streams; i++) {
4283  ist = input_streams[ifile->ist_index + i];
4284  if (ist->decoding_needed) {
4285  ret = process_input_packet(ist, NULL, 0);
4286  if (ret>0)
4287  return 0;
4288  }
4289 
4290  /* mark all outputs that don't go through lavfi as finished */
4291  for (j = 0; j < nb_output_streams; j++) {
4292  OutputStream *ost = output_streams[j];
4293 
4294  if (ost->source_index == ifile->ist_index + i &&
4295  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4296  finish_output_stream(ost);
4297  }
4298  }
4299 
4300  ifile->eof_reached = 1;
4301  return AVERROR(EAGAIN);
4302  }
4303 
4304  reset_eagain();
4305 
4306  if (do_pkt_dump) {
4308  is->streams[pkt.stream_index]);
4309  }
4310  /* the following test is needed in case new streams appear
4311  dynamically in stream : we ignore them */
4312  if (pkt.stream_index >= ifile->nb_streams) {
4313  report_new_stream(file_index, &pkt);
4314  goto discard_packet;
4315  }
4316 
4317  ist = input_streams[ifile->ist_index + pkt.stream_index];
4318 
4319  ist->data_size += pkt.size;
4320  ist->nb_packets++;
4321 
4322  if (ist->discard)
4323  goto discard_packet;
4324 
4325  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4327  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4328  if (exit_on_error)
4329  exit_program(1);
4330  }
4331 
4332  if (debug_ts) {
4333  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4334  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4338  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4339  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4340  av_ts2str(input_files[ist->file_index]->ts_offset),
4341  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4342  }
4343 
4344  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4345  int64_t stime, stime2;
4346  // Correcting starttime based on the enabled streams
4347  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4348  // so we instead do it here as part of discontinuity handling
4349  if ( ist->next_dts == AV_NOPTS_VALUE
4350  && ifile->ts_offset == -is->start_time
4351  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4352  int64_t new_start_time = INT64_MAX;
4353  for (i=0; i<is->nb_streams; i++) {
4354  AVStream *st = is->streams[i];
4355  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4356  continue;
4357  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4358  }
4359  if (new_start_time > is->start_time) {
4360  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4361  ifile->ts_offset = -new_start_time;
4362  }
4363  }
4364 
4365  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4366  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4367  ist->wrap_correction_done = 1;
4368 
4369  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4370  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4371  ist->wrap_correction_done = 0;
4372  }
4373  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4374  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4375  ist->wrap_correction_done = 0;
4376  }
4377  }
4378 
4379  /* add the stream-global side data to the first packet */
4380  if (ist->nb_packets == 1) {
4381  for (i = 0; i < ist->st->nb_side_data; i++) {
4382  AVPacketSideData *src_sd = &ist->st->side_data[i];
4383  uint8_t *dst_data;
4384 
4385  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4386  continue;
4387 
4388  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4389  continue;
4390 
4391  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4392  if (!dst_data)
4393  exit_program(1);
4394 
4395  memcpy(dst_data, src_sd->data, src_sd->size);
4396  }
4397  }
4398 
4399  if (pkt.dts != AV_NOPTS_VALUE)
4400  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4401  if (pkt.pts != AV_NOPTS_VALUE)
4402  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4403 
4404  if (pkt.pts != AV_NOPTS_VALUE)
4405  pkt.pts *= ist->ts_scale;
4406  if (pkt.dts != AV_NOPTS_VALUE)
4407  pkt.dts *= ist->ts_scale;
4408 
4410  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4412  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4413  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4414  int64_t delta = pkt_dts - ifile->last_ts;
4415  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4416  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4417  ifile->ts_offset -= delta;
4419  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4420  delta, ifile->ts_offset);
4421  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4422  if (pkt.pts != AV_NOPTS_VALUE)
4423  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4424  }
4425  }
4426 
4427  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4428  if (pkt.pts != AV_NOPTS_VALUE) {
4429  pkt.pts += duration;
4430  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4431  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4432  }
4433 
4434  if (pkt.dts != AV_NOPTS_VALUE)
4435  pkt.dts += duration;
4436 
4438 
4439  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4440  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4441  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4442  ist->st->time_base, AV_TIME_BASE_Q,
4444  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4445  disable_discontinuity_correction = 0;
4446  }
4447 
4448  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4450  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4451  !disable_discontinuity_correction) {
4452  int64_t delta = pkt_dts - ist->next_dts;
4453  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4454  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4455  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4456  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4457  ifile->ts_offset -= delta;
4459  "timestamp discontinuity for stream #%d:%d "
4460  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4461  ist->file_index, ist->st->index, ist->st->id,
4463  delta, ifile->ts_offset);
4464  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4465  if (pkt.pts != AV_NOPTS_VALUE)
4466  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4467  }
4468  } else {
4469  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4470  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4471  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4472  pkt.dts = AV_NOPTS_VALUE;
4473  }
4474  if (pkt.pts != AV_NOPTS_VALUE){
4475  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4476  delta = pkt_pts - ist->next_dts;
4477  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4478  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4479  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4480  pkt.pts = AV_NOPTS_VALUE;
4481  }
4482  }
4483  }
4484  }
4485 
4486  if (pkt.dts != AV_NOPTS_VALUE)
4487  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4488 
4489  if (debug_ts) {
4490  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4492  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4493  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4494  av_ts2str(input_files[ist->file_index]->ts_offset),
4495  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4496  }
4497 
4498  sub2video_heartbeat(ist, pkt.pts);
4499 
4500  process_input_packet(ist, &pkt, 0);
4501 
4502 discard_packet:
4503  av_packet_unref(&pkt);
4504 
4505  return 0;
4506 }
4507 
4508 /**
4509  * Perform a step of transcoding for the specified filter graph.
4510  *
4511  * @param[in] graph filter graph to consider
4512  * @param[out] best_ist input stream where a frame would allow to continue
4513  * @return 0 for success, <0 for error
4514  */
4515 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4516 {
4517  int i, ret;
4518  int nb_requests, nb_requests_max = 0;
4519  InputFilter *ifilter;
4520  InputStream *ist;
4521 
4522  *best_ist = NULL;
4523  ret = avfilter_graph_request_oldest(graph->graph);
4524  if (ret >= 0)
4525  return reap_filters(0);
4526 
4527  if (ret == AVERROR_EOF) {
4528  ret = reap_filters(1);
4529  for (i = 0; i < graph->nb_outputs; i++)
4530  close_output_stream(graph->outputs[i]->ost);
4531  return ret;
4532  }
4533  if (ret != AVERROR(EAGAIN))
4534  return ret;
4535 
4536  for (i = 0; i < graph->nb_inputs; i++) {
4537  ifilter = graph->inputs[i];
4538  ist = ifilter->ist;
4539  if (input_files[ist->file_index]->eagain ||
4540  input_files[ist->file_index]->eof_reached)
4541  continue;
4542  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4543  if (nb_requests > nb_requests_max) {
4544  nb_requests_max = nb_requests;
4545  *best_ist = ist;
4546  }
4547  }
4548 
4549  if (!*best_ist)
4550  for (i = 0; i < graph->nb_outputs; i++)
4551  graph->outputs[i]->ost->unavailable = 1;
4552 
4553  return 0;
4554 }
4555 
4556 /**
4557  * Run a single step of transcoding.
4558  *
4559  * @return 0 for success, <0 for error
4560  */
4561 static int transcode_step(void)
4562 {
4563  OutputStream *ost;
4564  InputStream *ist = NULL;
4565  int ret;
4566 
4567  ost = choose_output();
4568  if (!ost) {
4569  if (got_eagain()) {
4570  reset_eagain();
4571  av_usleep(10000);
4572  return 0;
4573  }
4574  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4575  return AVERROR_EOF;
4576  }
4577 
4578  if (ost->filter && !ost->filter->graph->