FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
182  av_frame_unref(frame);
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276  sub2video_push_ref(ist, pts);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
313 static void sub2video_flush(InputStream *ist)
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
533  av_fifo_generic_read(ifilter->frame_queue, &frame,
534  sizeof(frame), NULL);
535  av_frame_free(&frame);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  if (ist->sub2video.sub_queue) {
539  while (av_fifo_size(ist->sub2video.sub_queue)) {
540  AVSubtitle sub;
542  &sub, sizeof(sub), NULL);
543  avsubtitle_free(&sub);
544  }
546  }
547  av_buffer_unref(&ifilter->hw_frames_ctx);
548  av_freep(&ifilter->name);
549  av_freep(&fg->inputs[j]);
550  }
551  av_freep(&fg->inputs);
552  for (j = 0; j < fg->nb_outputs; j++) {
553  OutputFilter *ofilter = fg->outputs[j];
554 
555  avfilter_inout_free(&ofilter->out_tmp);
556  av_freep(&ofilter->name);
557  av_freep(&ofilter->formats);
558  av_freep(&ofilter->channel_layouts);
559  av_freep(&ofilter->sample_rates);
560  av_freep(&fg->outputs[j]);
561  }
562  av_freep(&fg->outputs);
563  av_freep(&fg->graph_desc);
564 
565  av_freep(&filtergraphs[i]);
566  }
567  av_freep(&filtergraphs);
568 
570 
571  /* close files */
572  for (i = 0; i < nb_output_files; i++) {
573  OutputFile *of = output_files[i];
575  if (!of)
576  continue;
577  s = of->ctx;
578  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
579  avio_closep(&s->pb);
581  av_dict_free(&of->opts);
582 
583  av_freep(&output_files[i]);
584  }
585  for (i = 0; i < nb_output_streams; i++) {
586  OutputStream *ost = output_streams[i];
587 
588  if (!ost)
589  continue;
590 
591  av_bsf_free(&ost->bsf_ctx);
592 
594  av_frame_free(&ost->last_frame);
595  av_dict_free(&ost->encoder_opts);
596 
597  av_freep(&ost->forced_keyframes);
599  av_freep(&ost->avfilter);
600  av_freep(&ost->logfile_prefix);
601 
603  ost->audio_channels_mapped = 0;
604 
605  av_dict_free(&ost->sws_dict);
606  av_dict_free(&ost->swr_opts);
607 
610 
611  if (ost->muxing_queue) {
612  while (av_fifo_size(ost->muxing_queue)) {
613  AVPacket pkt;
614  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
615  av_packet_unref(&pkt);
616  }
618  }
619 
620  av_freep(&output_streams[i]);
621  }
622 #if HAVE_THREADS
623  free_input_threads();
624 #endif
625  for (i = 0; i < nb_input_files; i++) {
626  avformat_close_input(&input_files[i]->ctx);
627  av_freep(&input_files[i]);
628  }
629  for (i = 0; i < nb_input_streams; i++) {
630  InputStream *ist = input_streams[i];
631 
634  av_dict_free(&ist->decoder_opts);
637  av_freep(&ist->filters);
638  av_freep(&ist->hwaccel_device);
639  av_freep(&ist->dts_buffer);
640 
642 
643  av_freep(&input_streams[i]);
644  }
645 
646  if (vstats_file) {
647  if (fclose(vstats_file))
649  "Error closing vstats file, loss of information possible: %s\n",
650  av_err2str(AVERROR(errno)));
651  }
653 
654  av_freep(&input_streams);
655  av_freep(&input_files);
656  av_freep(&output_streams);
657  av_freep(&output_files);
658 
659  uninit_opts();
660 
662 
663  if (received_sigterm) {
664  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
665  (int) received_sigterm);
666  } else if (ret && atomic_load(&transcode_init_done)) {
667  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
668  }
669  term_exit();
670  ffmpeg_exited = 1;
671 }
672 
674 {
675  AVDictionaryEntry *t = NULL;
676 
677  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
679  }
680 }
681 
683 {
685  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
686  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
687  exit_program(1);
688  }
689 }
690 
691 static void abort_codec_experimental(AVCodec *c, int encoder)
692 {
693  exit_program(1);
694 }
695 
696 static void update_benchmark(const char *fmt, ...)
697 {
698  if (do_benchmark_all) {
700  va_list va;
701  char buf[1024];
702 
703  if (fmt) {
704  va_start(va, fmt);
705  vsnprintf(buf, sizeof(buf), fmt, va);
706  va_end(va);
708  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
709  t.user_usec - current_time.user_usec,
710  t.sys_usec - current_time.sys_usec,
711  t.real_usec - current_time.real_usec, buf);
712  }
713  current_time = t;
714  }
715 }
716 
718 {
719  int i;
720  for (i = 0; i < nb_output_streams; i++) {
721  OutputStream *ost2 = output_streams[i];
722  ost2->finished |= ost == ost2 ? this_stream : others;
723  }
724 }
725 
726 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
727 {
728  AVFormatContext *s = of->ctx;
729  AVStream *st = ost->st;
730  int ret;
731 
732  /*
733  * Audio encoders may split the packets -- #frames in != #packets out.
734  * But there is no reordering, so we can limit the number of output packets
735  * by simply dropping them here.
736  * Counting encoded video frames needs to be done separately because of
737  * reordering, see do_video_out().
738  * Do not count the packet when unqueued because it has been counted when queued.
739  */
740  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
741  if (ost->frame_number >= ost->max_frames) {
742  av_packet_unref(pkt);
743  return;
744  }
745  ost->frame_number++;
746  }
747 
748  if (!of->header_written) {
749  AVPacket tmp_pkt = {0};
750  /* the muxer is not initialized yet, buffer the packet */
751  if (!av_fifo_space(ost->muxing_queue)) {
752  unsigned int are_we_over_size =
754  int new_size = are_we_over_size ?
755  FFMIN(2 * av_fifo_size(ost->muxing_queue),
756  ost->max_muxing_queue_size) :
757  2 * av_fifo_size(ost->muxing_queue);
758 
759  if (new_size <= av_fifo_size(ost->muxing_queue)) {
761  "Too many packets buffered for output stream %d:%d.\n",
762  ost->file_index, ost->st->index);
763  exit_program(1);
764  }
765  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
766  if (ret < 0)
767  exit_program(1);
768  }
769  ret = av_packet_make_refcounted(pkt);
770  if (ret < 0)
771  exit_program(1);
772  av_packet_move_ref(&tmp_pkt, pkt);
773  ost->muxing_queue_data_size += tmp_pkt.size;
774  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
775  return;
776  }
777 
780  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
781 
782  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
783  int i;
785  NULL);
786  ost->quality = sd ? AV_RL32(sd) : -1;
787  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
788 
789  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
790  if (sd && i < sd[5])
791  ost->error[i] = AV_RL64(sd + 8 + 8*i);
792  else
793  ost->error[i] = -1;
794  }
795 
796  if (ost->frame_rate.num && ost->is_cfr) {
797  if (pkt->duration > 0)
798  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
799  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
800  ost->mux_timebase);
801  }
802  }
803 
804  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
805 
806  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
807  if (pkt->dts != AV_NOPTS_VALUE &&
808  pkt->pts != AV_NOPTS_VALUE &&
809  pkt->dts > pkt->pts) {
810  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
811  pkt->dts, pkt->pts,
812  ost->file_index, ost->st->index);
813  pkt->pts =
814  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
815  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
816  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
817  }
819  pkt->dts != AV_NOPTS_VALUE &&
820  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
821  ost->last_mux_dts != AV_NOPTS_VALUE) {
822  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
823  if (pkt->dts < max) {
824  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
825  if (exit_on_error)
826  loglevel = AV_LOG_ERROR;
827  av_log(s, loglevel, "Non-monotonous DTS in output stream "
828  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
829  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
830  if (exit_on_error) {
831  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
832  exit_program(1);
833  }
834  av_log(s, loglevel, "changing to %"PRId64". This may result "
835  "in incorrect timestamps in the output file.\n",
836  max);
837  if (pkt->pts >= pkt->dts)
838  pkt->pts = FFMAX(pkt->pts, max);
839  pkt->dts = max;
840  }
841  }
842  }
843  ost->last_mux_dts = pkt->dts;
844 
845  ost->data_size += pkt->size;
846  ost->packets_written++;
847 
848  pkt->stream_index = ost->index;
849 
850  if (debug_ts) {
851  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
852  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
854  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
855  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
856  pkt->size
857  );
858  }
859 
860  ret = av_interleaved_write_frame(s, pkt);
861  if (ret < 0) {
862  print_error("av_interleaved_write_frame()", ret);
863  main_return_code = 1;
865  }
866  av_packet_unref(pkt);
867 }
868 
870 {
871  OutputFile *of = output_files[ost->file_index];
872 
873  ost->finished |= ENCODER_FINISHED;
874  if (of->shortest) {
875  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
876  of->recording_time = FFMIN(of->recording_time, end);
877  }
878 }
879 
880 /*
881  * Send a single packet to the output, applying any bitstream filters
882  * associated with the output stream. This may result in any number
883  * of packets actually being written, depending on what bitstream
884  * filters are applied. The supplied packet is consumed and will be
885  * blank (as if newly-allocated) when this function returns.
886  *
887  * If eof is set, instead indicate EOF to all bitstream filters and
888  * therefore flush any delayed packets to the output. A blank packet
889  * must be supplied in this case.
890  */
892  OutputStream *ost, int eof)
893 {
894  int ret = 0;
895 
896  /* apply the output bitstream filters */
897  if (ost->bsf_ctx) {
898  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
899  if (ret < 0)
900  goto finish;
901  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
902  write_packet(of, pkt, ost, 0);
903  if (ret == AVERROR(EAGAIN))
904  ret = 0;
905  } else if (!eof)
906  write_packet(of, pkt, ost, 0);
907 
908 finish:
909  if (ret < 0 && ret != AVERROR_EOF) {
910  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
911  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
912  if(exit_on_error)
913  exit_program(1);
914  }
915 }
916 
918 {
919  OutputFile *of = output_files[ost->file_index];
920 
921  if (of->recording_time != INT64_MAX &&
923  AV_TIME_BASE_Q) >= 0) {
924  close_output_stream(ost);
925  return 0;
926  }
927  return 1;
928 }
929 
931  AVFrame *frame)
932 {
933  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
934  AVCodecContext *enc = ost->enc_ctx;
935  if (!frame || frame->pts == AV_NOPTS_VALUE ||
936  !enc || !ost->filter || !ost->filter->graph->graph)
937  goto early_exit;
938 
939  {
941 
942  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
943  AVRational filter_tb = av_buffersink_get_time_base(filter);
944  AVRational tb = enc->time_base;
945  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
946 
947  tb.den <<= extra_bits;
948  float_pts =
949  av_rescale_q(frame->pts, filter_tb, tb) -
950  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
951  float_pts /= 1 << extra_bits;
952  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
953  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
954 
955  frame->pts =
956  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
957  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
958  }
959 
960 early_exit:
961 
962  if (debug_ts) {
963  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
964  frame ? av_ts2str(frame->pts) : "NULL",
965  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
966  float_pts,
967  enc ? enc->time_base.num : -1,
968  enc ? enc->time_base.den : -1);
969  }
970 
971  return float_pts;
972 }
973 
975  char *error, int error_len);
976 
978  unsigned int fatal)
979 {
980  int ret = AVERROR_BUG;
981  char error[1024] = {0};
982 
983  if (ost->initialized)
984  return 0;
985 
986  ret = init_output_stream(ost, frame, error, sizeof(error));
987  if (ret < 0) {
988  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
989  ost->file_index, ost->index, error);
990 
991  if (fatal)
992  exit_program(1);
993  }
994 
995  return ret;
996 }
997 
998 static void do_audio_out(OutputFile *of, OutputStream *ost,
999  AVFrame *frame)
1000 {
1001  AVCodecContext *enc = ost->enc_ctx;
1002  AVPacket pkt;
1003  int ret;
1004 
1005  av_init_packet(&pkt);
1006  pkt.data = NULL;
1007  pkt.size = 0;
1008 
1009  adjust_frame_pts_to_encoder_tb(of, ost, frame);
1010 
1011  if (!check_recording_time(ost))
1012  return;
1013 
1014  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1015  frame->pts = ost->sync_opts;
1016  ost->sync_opts = frame->pts + frame->nb_samples;
1017  ost->samples_encoded += frame->nb_samples;
1018  ost->frames_encoded++;
1019 
1020  av_assert0(pkt.size || !pkt.data);
1022  if (debug_ts) {
1023  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1024  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1025  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1026  enc->time_base.num, enc->time_base.den);
1027  }
1028 
1029  ret = avcodec_send_frame(enc, frame);
1030  if (ret < 0)
1031  goto error;
1032 
1033  while (1) {
1034  ret = avcodec_receive_packet(enc, &pkt);
1035  if (ret == AVERROR(EAGAIN))
1036  break;
1037  if (ret < 0)
1038  goto error;
1039 
1040  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1041 
1042  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1043 
1044  if (debug_ts) {
1045  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1046  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1047  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1048  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1049  }
1050 
1051  output_packet(of, &pkt, ost, 0);
1052  }
1053 
1054  return;
1055 error:
1056  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1057  exit_program(1);
1058 }
1059 
1060 static void do_subtitle_out(OutputFile *of,
1061  OutputStream *ost,
1062  AVSubtitle *sub)
1063 {
1064  int subtitle_out_max_size = 1024 * 1024;
1065  int subtitle_out_size, nb, i;
1066  AVCodecContext *enc;
1067  AVPacket pkt;
1068  int64_t pts;
1069 
1070  if (sub->pts == AV_NOPTS_VALUE) {
1071  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1072  if (exit_on_error)
1073  exit_program(1);
1074  return;
1075  }
1076 
1077  enc = ost->enc_ctx;
1078 
1079  if (!subtitle_out) {
1080  subtitle_out = av_malloc(subtitle_out_max_size);
1081  if (!subtitle_out) {
1082  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1083  exit_program(1);
1084  }
1085  }
1086 
1087  /* Note: DVB subtitle need one packet to draw them and one other
1088  packet to clear them */
1089  /* XXX: signal it in the codec context ? */
1090  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1091  nb = 2;
1092  else
1093  nb = 1;
1094 
1095  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1096  pts = sub->pts;
1097  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1098  pts -= output_files[ost->file_index]->start_time;
1099  for (i = 0; i < nb; i++) {
1100  unsigned save_num_rects = sub->num_rects;
1101 
1102  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1103  if (!check_recording_time(ost))
1104  return;
1105 
1106  sub->pts = pts;
1107  // start_display_time is required to be 0
1108  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1109  sub->end_display_time -= sub->start_display_time;
1110  sub->start_display_time = 0;
1111  if (i == 1)
1112  sub->num_rects = 0;
1113 
1114  ost->frames_encoded++;
1115 
1116  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1117  subtitle_out_max_size, sub);
1118  if (i == 1)
1119  sub->num_rects = save_num_rects;
1120  if (subtitle_out_size < 0) {
1121  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1122  exit_program(1);
1123  }
1124 
1125  av_init_packet(&pkt);
1126  pkt.data = subtitle_out;
1127  pkt.size = subtitle_out_size;
1128  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1129  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1130  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1131  /* XXX: the pts correction is handled here. Maybe handling
1132  it in the codec would be better */
1133  if (i == 0)
1134  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1135  else
1136  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137  }
1138  pkt.dts = pkt.pts;
1139  output_packet(of, &pkt, ost, 0);
1140  }
1141 }
1142 
1143 static void do_video_out(OutputFile *of,
1144  OutputStream *ost,
1145  AVFrame *next_picture)
1146 {
1147  int ret, format_video_sync;
1148  AVPacket pkt;
1149  AVCodecContext *enc = ost->enc_ctx;
1150  AVRational frame_rate;
1151  int nb_frames, nb0_frames, i;
1152  double delta, delta0;
1153  double duration = 0;
1154  double sync_ipts = AV_NOPTS_VALUE;
1155  int frame_size = 0;
1156  InputStream *ist = NULL;
1158 
1159  init_output_stream_wrapper(ost, next_picture, 1);
1160  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1161 
1162  if (ost->source_index >= 0)
1163  ist = input_streams[ost->source_index];
1164 
1165  frame_rate = av_buffersink_get_frame_rate(filter);
1166  if (frame_rate.num > 0 && frame_rate.den > 0)
1167  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1168 
1169  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1170  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1171 
1172  if (!ost->filters_script &&
1173  !ost->filters &&
1174  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1175  next_picture &&
1176  ist &&
1177  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1178  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1179  }
1180 
1181  if (!next_picture) {
1182  //end, flushing
1183  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1184  ost->last_nb0_frames[1],
1185  ost->last_nb0_frames[2]);
1186  } else {
1187  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1188  delta = delta0 + duration;
1189 
1190  /* by default, we output a single frame */
1191  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1192  nb_frames = 1;
1193 
1194  format_video_sync = video_sync_method;
1195  if (format_video_sync == VSYNC_AUTO) {
1196  if(!strcmp(of->ctx->oformat->name, "avi")) {
1197  format_video_sync = VSYNC_VFR;
1198  } else
1199  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1200  if ( ist
1201  && format_video_sync == VSYNC_CFR
1202  && input_files[ist->file_index]->ctx->nb_streams == 1
1203  && input_files[ist->file_index]->input_ts_offset == 0) {
1204  format_video_sync = VSYNC_VSCFR;
1205  }
1206  if (format_video_sync == VSYNC_CFR && copy_ts) {
1207  format_video_sync = VSYNC_VSCFR;
1208  }
1209  }
1210  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1211 
1212  if (delta0 < 0 &&
1213  delta > 0 &&
1214  format_video_sync != VSYNC_PASSTHROUGH &&
1215  format_video_sync != VSYNC_DROP) {
1216  if (delta0 < -0.6) {
1217  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1218  } else
1219  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1220  sync_ipts = ost->sync_opts;
1221  duration += delta0;
1222  delta0 = 0;
1223  }
1224 
1225  switch (format_video_sync) {
1226  case VSYNC_VSCFR:
1227  if (ost->frame_number == 0 && delta0 >= 0.5) {
1228  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1229  delta = duration;
1230  delta0 = 0;
1231  ost->sync_opts = llrint(sync_ipts);
1232  }
1233  case VSYNC_CFR:
1234  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1235  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1236  nb_frames = 0;
1237  } else if (delta < -1.1)
1238  nb_frames = 0;
1239  else if (delta > 1.1) {
1240  nb_frames = lrintf(delta);
1241  if (delta0 > 1.1)
1242  nb0_frames = llrintf(delta0 - 0.6);
1243  }
1244  break;
1245  case VSYNC_VFR:
1246  if (delta <= -0.6)
1247  nb_frames = 0;
1248  else if (delta > 0.6)
1249  ost->sync_opts = llrint(sync_ipts);
1250  break;
1251  case VSYNC_DROP:
1252  case VSYNC_PASSTHROUGH:
1253  ost->sync_opts = llrint(sync_ipts);
1254  break;
1255  default:
1256  av_assert0(0);
1257  }
1258  }
1259 
1260  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1261  nb0_frames = FFMIN(nb0_frames, nb_frames);
1262 
1263  memmove(ost->last_nb0_frames + 1,
1264  ost->last_nb0_frames,
1265  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1266  ost->last_nb0_frames[0] = nb0_frames;
1267 
1268  if (nb0_frames == 0 && ost->last_dropped) {
1269  nb_frames_drop++;
1271  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1272  ost->frame_number, ost->st->index, ost->last_frame->pts);
1273  }
1274  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1275  if (nb_frames > dts_error_threshold * 30) {
1276  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1277  nb_frames_drop++;
1278  return;
1279  }
1280  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1281  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1282  if (nb_frames_dup > dup_warning) {
1283  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1284  dup_warning *= 10;
1285  }
1286  }
1287  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1288 
1289  /* duplicates frame if needed */
1290  for (i = 0; i < nb_frames; i++) {
1291  AVFrame *in_picture;
1292  int forced_keyframe = 0;
1293  double pts_time;
1294  av_init_packet(&pkt);
1295  pkt.data = NULL;
1296  pkt.size = 0;
1297 
1298  if (i < nb0_frames && ost->last_frame) {
1299  in_picture = ost->last_frame;
1300  } else
1301  in_picture = next_picture;
1302 
1303  if (!in_picture)
1304  return;
1305 
1306  in_picture->pts = ost->sync_opts;
1307 
1308  if (!check_recording_time(ost))
1309  return;
1310 
1311  in_picture->quality = enc->global_quality;
1312  in_picture->pict_type = 0;
1313 
1314  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1315  in_picture->pts != AV_NOPTS_VALUE)
1316  ost->forced_kf_ref_pts = in_picture->pts;
1317 
1318  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1319  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1320  if (ost->forced_kf_index < ost->forced_kf_count &&
1321  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1322  ost->forced_kf_index++;
1323  forced_keyframe = 1;
1324  } else if (ost->forced_keyframes_pexpr) {
1325  double res;
1326  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1329  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1335  res);
1336  if (res) {
1337  forced_keyframe = 1;
1343  }
1344 
1346  } else if ( ost->forced_keyframes
1347  && !strncmp(ost->forced_keyframes, "source", 6)
1348  && in_picture->key_frame==1
1349  && !i) {
1350  forced_keyframe = 1;
1351  }
1352 
1353  if (forced_keyframe) {
1354  in_picture->pict_type = AV_PICTURE_TYPE_I;
1355  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1356  }
1357 
1359  if (debug_ts) {
1360  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1361  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1362  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1363  enc->time_base.num, enc->time_base.den);
1364  }
1365 
1366  ost->frames_encoded++;
1367 
1368  ret = avcodec_send_frame(enc, in_picture);
1369  if (ret < 0)
1370  goto error;
1371  // Make sure Closed Captions will not be duplicated
1373 
1374  while (1) {
1375  ret = avcodec_receive_packet(enc, &pkt);
1376  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377  if (ret == AVERROR(EAGAIN))
1378  break;
1379  if (ret < 0)
1380  goto error;
1381 
1382  if (debug_ts) {
1383  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1385  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1386  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1387  }
1388 
1389  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1390  pkt.pts = ost->sync_opts;
1391 
1392  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1393 
1394  if (debug_ts) {
1395  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1398  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1399  }
1400 
1401  frame_size = pkt.size;
1402  output_packet(of, &pkt, ost, 0);
1403 
1404  /* if two pass, output log */
1405  if (ost->logfile && enc->stats_out) {
1406  fprintf(ost->logfile, "%s", enc->stats_out);
1407  }
1408  }
1409  ost->sync_opts++;
1410  /*
1411  * For video, number of frames in == number of packets out.
1412  * But there may be reordering, so we can't throw away frames on encoder
1413  * flush, we need to limit them here, before they go into encoder.
1414  */
1415  ost->frame_number++;
1416 
1417  if (vstats_filename && frame_size)
1418  do_video_stats(ost, frame_size);
1419  }
1420 
1421  if (!ost->last_frame)
1422  ost->last_frame = av_frame_alloc();
1423  av_frame_unref(ost->last_frame);
1424  if (next_picture && ost->last_frame)
1425  av_frame_ref(ost->last_frame, next_picture);
1426  else
1427  av_frame_free(&ost->last_frame);
1428 
1429  return;
1430 error:
1431  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1432  exit_program(1);
1433 }
1434 
1435 static double psnr(double d)
1436 {
1437  return -10.0 * log10(d);
1438 }
1439 
1441 {
1442  AVCodecContext *enc;
1443  int frame_number;
1444  double ti1, bitrate, avg_bitrate;
1445 
1446  /* this is executed just the first time do_video_stats is called */
1447  if (!vstats_file) {
1448  vstats_file = fopen(vstats_filename, "w");
1449  if (!vstats_file) {
1450  perror("fopen");
1451  exit_program(1);
1452  }
1453  }
1454 
1455  enc = ost->enc_ctx;
1456  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457  frame_number = ost->st->nb_frames;
1458  if (vstats_version <= 1) {
1459  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460  ost->quality / (float)FF_QP2LAMBDA);
1461  } else {
1462  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463  ost->quality / (float)FF_QP2LAMBDA);
1464  }
1465 
1466  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1468 
1469  fprintf(vstats_file,"f_size= %6d ", frame_size);
1470  /* compute pts value */
1471  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1472  if (ti1 < 0.01)
1473  ti1 = 0.01;
1474 
1475  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1480  }
1481 }
1482 
1484 {
1485  OutputFile *of = output_files[ost->file_index];
1486  int i;
1487 
1489 
1490  if (of->shortest) {
1491  for (i = 0; i < of->ctx->nb_streams; i++)
1492  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1493  }
1494 }
1495 
1496 /**
1497  * Get and encode new output from any of the filtergraphs, without causing
1498  * activity.
1499  *
1500  * @return 0 for success, <0 for severe errors
1501  */
1502 static int reap_filters(int flush)
1503 {
1504  AVFrame *filtered_frame = NULL;
1505  int i;
1506 
1507  /* Reap all buffers present in the buffer sinks */
1508  for (i = 0; i < nb_output_streams; i++) {
1509  OutputStream *ost = output_streams[i];
1510  OutputFile *of = output_files[ost->file_index];
1512  AVCodecContext *enc = ost->enc_ctx;
1513  int ret = 0;
1514 
1515  if (!ost->filter || !ost->filter->graph->graph)
1516  continue;
1517  filter = ost->filter->filter;
1518 
1519  /*
1520  * Unlike video, with audio the audio frame size matters.
1521  * Currently we are fully reliant on the lavfi filter chain to
1522  * do the buffering deed for us, and thus the frame size parameter
1523  * needs to be set accordingly. Where does one get the required
1524  * frame size? From the initialized AVCodecContext of an audio
1525  * encoder. Thus, if we have gotten to an audio stream, initialize
1526  * the encoder earlier than receiving the first AVFrame.
1527  */
1530 
1531  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1532  return AVERROR(ENOMEM);
1533  }
1534  filtered_frame = ost->filtered_frame;
1535 
1536  while (1) {
1537  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1539  if (ret < 0) {
1540  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1542  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1543  } else if (flush && ret == AVERROR_EOF) {
1545  do_video_out(of, ost, NULL);
1546  }
1547  break;
1548  }
1549  if (ost->finished) {
1550  av_frame_unref(filtered_frame);
1551  continue;
1552  }
1553 
1554  switch (av_buffersink_get_type(filter)) {
1555  case AVMEDIA_TYPE_VIDEO:
1556  if (!ost->frame_aspect_ratio.num)
1557  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1558 
1559  do_video_out(of, ost, filtered_frame);
1560  break;
1561  case AVMEDIA_TYPE_AUDIO:
1562  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1563  enc->channels != filtered_frame->channels) {
1565  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1566  break;
1567  }
1568  do_audio_out(of, ost, filtered_frame);
1569  break;
1570  default:
1571  // TODO support subtitle filters
1572  av_assert0(0);
1573  }
1574 
1575  av_frame_unref(filtered_frame);
1576  }
1577  }
1578 
1579  return 0;
1580 }
1581 
1582 static void print_final_stats(int64_t total_size)
1583 {
1584  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1585  uint64_t subtitle_size = 0;
1586  uint64_t data_size = 0;
1587  float percent = -1.0;
1588  int i, j;
1589  int pass1_used = 1;
1590 
1591  for (i = 0; i < nb_output_streams; i++) {
1592  OutputStream *ost = output_streams[i];
1593  switch (ost->enc_ctx->codec_type) {
1594  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1595  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1596  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1597  default: other_size += ost->data_size; break;
1598  }
1599  extra_size += ost->enc_ctx->extradata_size;
1600  data_size += ost->data_size;
1603  pass1_used = 0;
1604  }
1605 
1606  if (data_size && total_size>0 && total_size >= data_size)
1607  percent = 100.0 * (total_size - data_size) / data_size;
1608 
1609  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1610  video_size / 1024.0,
1611  audio_size / 1024.0,
1612  subtitle_size / 1024.0,
1613  other_size / 1024.0,
1614  extra_size / 1024.0);
1615  if (percent >= 0.0)
1616  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1617  else
1618  av_log(NULL, AV_LOG_INFO, "unknown");
1619  av_log(NULL, AV_LOG_INFO, "\n");
1620 
1621  /* print verbose per-stream stats */
1622  for (i = 0; i < nb_input_files; i++) {
1623  InputFile *f = input_files[i];
1624  uint64_t total_packets = 0, total_size = 0;
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1627  i, f->ctx->url);
1628 
1629  for (j = 0; j < f->nb_streams; j++) {
1630  InputStream *ist = input_streams[f->ist_index + j];
1631  enum AVMediaType type = ist->dec_ctx->codec_type;
1632 
1633  total_size += ist->data_size;
1634  total_packets += ist->nb_packets;
1635 
1636  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1637  i, j, media_type_string(type));
1638  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1639  ist->nb_packets, ist->data_size);
1640 
1641  if (ist->decoding_needed) {
1642  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1643  ist->frames_decoded);
1644  if (type == AVMEDIA_TYPE_AUDIO)
1645  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1646  av_log(NULL, AV_LOG_VERBOSE, "; ");
1647  }
1648 
1649  av_log(NULL, AV_LOG_VERBOSE, "\n");
1650  }
1651 
1652  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1653  total_packets, total_size);
1654  }
1655 
1656  for (i = 0; i < nb_output_files; i++) {
1657  OutputFile *of = output_files[i];
1658  uint64_t total_packets = 0, total_size = 0;
1659 
1660  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1661  i, of->ctx->url);
1662 
1663  for (j = 0; j < of->ctx->nb_streams; j++) {
1664  OutputStream *ost = output_streams[of->ost_index + j];
1665  enum AVMediaType type = ost->enc_ctx->codec_type;
1666 
1667  total_size += ost->data_size;
1668  total_packets += ost->packets_written;
1669 
1670  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1671  i, j, media_type_string(type));
1672  if (ost->encoding_needed) {
1673  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1674  ost->frames_encoded);
1675  if (type == AVMEDIA_TYPE_AUDIO)
1676  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1677  av_log(NULL, AV_LOG_VERBOSE, "; ");
1678  }
1679 
1680  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1681  ost->packets_written, ost->data_size);
1682 
1683  av_log(NULL, AV_LOG_VERBOSE, "\n");
1684  }
1685 
1686  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1687  total_packets, total_size);
1688  }
1689  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1690  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1691  if (pass1_used) {
1692  av_log(NULL, AV_LOG_WARNING, "\n");
1693  } else {
1694  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1695  }
1696  }
1697 }
1698 
1699 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1700 {
1701  AVBPrint buf, buf_script;
1702  OutputStream *ost;
1703  AVFormatContext *oc;
1704  int64_t total_size;
1705  AVCodecContext *enc;
1706  int frame_number, vid, i;
1707  double bitrate;
1708  double speed;
1709  int64_t pts = INT64_MIN + 1;
1710  static int64_t last_time = -1;
1711  static int first_report = 1;
1712  static int qp_histogram[52];
1713  int hours, mins, secs, us;
1714  const char *hours_sign;
1715  int ret;
1716  float t;
1717 
1718  if (!print_stats && !is_last_report && !progress_avio)
1719  return;
1720 
1721  if (!is_last_report) {
1722  if (last_time == -1) {
1723  last_time = cur_time;
1724  }
1725  if (((cur_time - last_time) < stats_period && !first_report) ||
1726  (first_report && nb_output_dumped < nb_output_files))
1727  return;
1728  last_time = cur_time;
1729  }
1730 
1731  t = (cur_time-timer_start) / 1000000.0;
1732 
1733 
1734  oc = output_files[0]->ctx;
1735 
1736  total_size = avio_size(oc->pb);
1737  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1738  total_size = avio_tell(oc->pb);
1739 
1740  vid = 0;
1742  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1743  for (i = 0; i < nb_output_streams; i++) {
1744  float q = -1;
1745  ost = output_streams[i];
1746  enc = ost->enc_ctx;
1747  if (!ost->stream_copy)
1748  q = ost->quality / (float) FF_QP2LAMBDA;
1749 
1750  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1751  av_bprintf(&buf, "q=%2.1f ", q);
1752  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1753  ost->file_index, ost->index, q);
1754  }
1755  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1756  float fps;
1757 
1758  frame_number = ost->frame_number;
1759  fps = t > 1 ? frame_number / t : 0;
1760  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1761  frame_number, fps < 9.95, fps, q);
1762  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1763  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1764  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1765  ost->file_index, ost->index, q);
1766  if (is_last_report)
1767  av_bprintf(&buf, "L");
1768  if (qp_hist) {
1769  int j;
1770  int qp = lrintf(q);
1771  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1772  qp_histogram[qp]++;
1773  for (j = 0; j < 32; j++)
1774  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1775  }
1776 
1777  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1778  int j;
1779  double error, error_sum = 0;
1780  double scale, scale_sum = 0;
1781  double p;
1782  char type[3] = { 'Y','U','V' };
1783  av_bprintf(&buf, "PSNR=");
1784  for (j = 0; j < 3; j++) {
1785  if (is_last_report) {
1786  error = enc->error[j];
1787  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1788  } else {
1789  error = ost->error[j];
1790  scale = enc->width * enc->height * 255.0 * 255.0;
1791  }
1792  if (j)
1793  scale /= 4;
1794  error_sum += error;
1795  scale_sum += scale;
1796  p = psnr(error / scale);
1797  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1798  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1799  ost->file_index, ost->index, type[j] | 32, p);
1800  }
1801  p = psnr(error_sum / scale_sum);
1802  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1803  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1804  ost->file_index, ost->index, p);
1805  }
1806  vid = 1;
1807  }
1808  /* compute min output value */
1809  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1810  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1811  ost->st->time_base, AV_TIME_BASE_Q));
1812  if (copy_ts) {
1813  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1816  pts -= copy_ts_first_pts;
1817  }
1818  }
1819 
1820  if (is_last_report)
1821  nb_frames_drop += ost->last_dropped;
1822  }
1823 
1824  secs = FFABS(pts) / AV_TIME_BASE;
1825  us = FFABS(pts) % AV_TIME_BASE;
1826  mins = secs / 60;
1827  secs %= 60;
1828  hours = mins / 60;
1829  mins %= 60;
1830  hours_sign = (pts < 0) ? "-" : "";
1831 
1832  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1833  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1834 
1835  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1836  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1837  if (pts == AV_NOPTS_VALUE) {
1838  av_bprintf(&buf, "N/A ");
1839  } else {
1840  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1841  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1842  }
1843 
1844  if (bitrate < 0) {
1845  av_bprintf(&buf, "bitrate=N/A");
1846  av_bprintf(&buf_script, "bitrate=N/A\n");
1847  }else{
1848  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1849  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1850  }
1851 
1852  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1853  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1854  if (pts == AV_NOPTS_VALUE) {
1855  av_bprintf(&buf_script, "out_time_us=N/A\n");
1856  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1857  av_bprintf(&buf_script, "out_time=N/A\n");
1858  } else {
1859  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1860  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1861  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1862  hours_sign, hours, mins, secs, us);
1863  }
1864 
1866  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1867  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1868  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1869 
1870  if (speed < 0) {
1871  av_bprintf(&buf, " speed=N/A");
1872  av_bprintf(&buf_script, "speed=N/A\n");
1873  } else {
1874  av_bprintf(&buf, " speed=%4.3gx", speed);
1875  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1876  }
1877 
1878  if (print_stats || is_last_report) {
1879  const char end = is_last_report ? '\n' : '\r';
1880  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1881  fprintf(stderr, "%s %c", buf.str, end);
1882  } else
1883  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1884 
1885  fflush(stderr);
1886  }
1887  av_bprint_finalize(&buf, NULL);
1888 
1889  if (progress_avio) {
1890  av_bprintf(&buf_script, "progress=%s\n",
1891  is_last_report ? "end" : "continue");
1892  avio_write(progress_avio, buf_script.str,
1893  FFMIN(buf_script.len, buf_script.size - 1));
1894  avio_flush(progress_avio);
1895  av_bprint_finalize(&buf_script, NULL);
1896  if (is_last_report) {
1897  if ((ret = avio_closep(&progress_avio)) < 0)
1899  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1900  }
1901  }
1902 
1903  first_report = 0;
1904 
1905  if (is_last_report)
1906  print_final_stats(total_size);
1907 }
1908 
1910 {
1911  // We never got any input. Set a fake format, which will
1912  // come from libavformat.
1913  ifilter->format = par->format;
1914  ifilter->sample_rate = par->sample_rate;
1915  ifilter->channels = par->channels;
1916  ifilter->channel_layout = par->channel_layout;
1917  ifilter->width = par->width;
1918  ifilter->height = par->height;
1919  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1920 }
1921 
1922 static void flush_encoders(void)
1923 {
1924  int i, ret;
1925 
1926  for (i = 0; i < nb_output_streams; i++) {
1927  OutputStream *ost = output_streams[i];
1928  AVCodecContext *enc = ost->enc_ctx;
1929  OutputFile *of = output_files[ost->file_index];
1930 
1931  if (!ost->encoding_needed)
1932  continue;
1933 
1934  // Try to enable encoding with no input frames.
1935  // Maybe we should just let encoding fail instead.
1936  if (!ost->initialized) {
1937  FilterGraph *fg = ost->filter->graph;
1938 
1940  "Finishing stream %d:%d without any data written to it.\n",
1941  ost->file_index, ost->st->index);
1942 
1943  if (ost->filter && !fg->graph) {
1944  int x;
1945  for (x = 0; x < fg->nb_inputs; x++) {
1946  InputFilter *ifilter = fg->inputs[x];
1947  if (ifilter->format < 0)
1948  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1949  }
1950 
1952  continue;
1953 
1954  ret = configure_filtergraph(fg);
1955  if (ret < 0) {
1956  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1957  exit_program(1);
1958  }
1959 
1960  finish_output_stream(ost);
1961  }
1962 
1964  }
1965 
1967  continue;
1968 
1969  for (;;) {
1970  const char *desc = NULL;
1971  AVPacket pkt;
1972  int pkt_size;
1973 
1974  switch (enc->codec_type) {
1975  case AVMEDIA_TYPE_AUDIO:
1976  desc = "audio";
1977  break;
1978  case AVMEDIA_TYPE_VIDEO:
1979  desc = "video";
1980  break;
1981  default:
1982  av_assert0(0);
1983  }
1984 
1985  av_init_packet(&pkt);
1986  pkt.data = NULL;
1987  pkt.size = 0;
1988 
1990 
1991  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1992  ret = avcodec_send_frame(enc, NULL);
1993  if (ret < 0) {
1994  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1995  desc,
1996  av_err2str(ret));
1997  exit_program(1);
1998  }
1999  }
2000 
2001  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002  if (ret < 0 && ret != AVERROR_EOF) {
2003  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2004  desc,
2005  av_err2str(ret));
2006  exit_program(1);
2007  }
2008  if (ost->logfile && enc->stats_out) {
2009  fprintf(ost->logfile, "%s", enc->stats_out);
2010  }
2011  if (ret == AVERROR_EOF) {
2012  output_packet(of, &pkt, ost, 1);
2013  break;
2014  }
2015  if (ost->finished & MUXER_FINISHED) {
2016  av_packet_unref(&pkt);
2017  continue;
2018  }
2019  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
2020  pkt_size = pkt.size;
2021  output_packet(of, &pkt, ost, 0);
2023  do_video_stats(ost, pkt_size);
2024  }
2025  }
2026  }
2027 }
2028 
2029 /*
2030  * Check whether a packet from ist should be written into ost at this time
2031  */
2033 {
2034  OutputFile *of = output_files[ost->file_index];
2035  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2036 
2037  if (ost->source_index != ist_index)
2038  return 0;
2039 
2040  if (ost->finished)
2041  return 0;
2042 
2043  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2044  return 0;
2045 
2046  return 1;
2047 }
2048 
2049 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2050 {
2051  OutputFile *of = output_files[ost->file_index];
2052  InputFile *f = input_files [ist->file_index];
2053  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2055  AVPacket opkt;
2056 
2057  // EOF: flush output bitstream filters.
2058  if (!pkt) {
2059  av_init_packet(&opkt);
2060  opkt.data = NULL;
2061  opkt.size = 0;
2062  output_packet(of, &opkt, ost, 1);
2063  return;
2064  }
2065 
2066  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2068  return;
2069 
2070  if (!ost->frame_number && !ost->copy_prior_start) {
2071  int64_t comp_start = start_time;
2072  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2073  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2074  if (pkt->pts == AV_NOPTS_VALUE ?
2075  ist->pts < comp_start :
2076  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2077  return;
2078  }
2079 
2080  if (of->recording_time != INT64_MAX &&
2081  ist->pts >= of->recording_time + start_time) {
2082  close_output_stream(ost);
2083  return;
2084  }
2085 
2086  if (f->recording_time != INT64_MAX) {
2087  start_time = f->ctx->start_time;
2088  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2089  start_time += f->start_time;
2090  if (ist->pts >= f->recording_time + start_time) {
2091  close_output_stream(ost);
2092  return;
2093  }
2094  }
2095 
2096  /* force the input stream PTS */
2097  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2098  ost->sync_opts++;
2099 
2100  if (av_packet_ref(&opkt, pkt) < 0)
2101  exit_program(1);
2102 
2103  if (pkt->pts != AV_NOPTS_VALUE)
2104  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2105 
2106  if (pkt->dts == AV_NOPTS_VALUE) {
2107  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2108  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2110  if(!duration)
2111  duration = ist->dec_ctx->frame_size;
2112  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2113  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2115  /* dts will be set immediately afterwards to what pts is now */
2116  opkt.pts = opkt.dts - ost_tb_start_time;
2117  } else
2118  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2119  opkt.dts -= ost_tb_start_time;
2120 
2121  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2122 
2123  output_packet(of, &opkt, ost, 0);
2124 }
2125 
2127 {
2128  AVCodecContext *dec = ist->dec_ctx;
2129 
2130  if (!dec->channel_layout) {
2131  char layout_name[256];
2132 
2133  if (dec->channels > ist->guess_layout_max)
2134  return 0;
2136  if (!dec->channel_layout)
2137  return 0;
2138  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2139  dec->channels, dec->channel_layout);
2140  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2141  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2142  }
2143  return 1;
2144 }
2145 
2146 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2147 {
2148  if (*got_output || ret<0)
2149  decode_error_stat[ret<0] ++;
2150 
2151  if (ret < 0 && exit_on_error)
2152  exit_program(1);
2153 
2154  if (*got_output && ist) {
2157  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2158  if (exit_on_error)
2159  exit_program(1);
2160  }
2161  }
2162 }
2163 
2164 // Filters can be configured only if the formats of all inputs are known.
2166 {
2167  int i;
2168  for (i = 0; i < fg->nb_inputs; i++) {
2169  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2170  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2171  return 0;
2172  }
2173  return 1;
2174 }
2175 
2176 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2177 {
2178  FilterGraph *fg = ifilter->graph;
2179  int need_reinit, ret, i;
2180 
2181  /* determine if the parameters for this input changed */
2182  need_reinit = ifilter->format != frame->format;
2183 
2184  switch (ifilter->ist->st->codecpar->codec_type) {
2185  case AVMEDIA_TYPE_AUDIO:
2186  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2187  ifilter->channels != frame->channels ||
2188  ifilter->channel_layout != frame->channel_layout;
2189  break;
2190  case AVMEDIA_TYPE_VIDEO:
2191  need_reinit |= ifilter->width != frame->width ||
2192  ifilter->height != frame->height;
2193  break;
2194  }
2195 
2196  if (!ifilter->ist->reinit_filters && fg->graph)
2197  need_reinit = 0;
2198 
2199  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2200  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2201  need_reinit = 1;
2202 
2203  if (need_reinit) {
2204  ret = ifilter_parameters_from_frame(ifilter, frame);
2205  if (ret < 0)
2206  return ret;
2207  }
2208 
2209  /* (re)init the graph if possible, otherwise buffer the frame and return */
2210  if (need_reinit || !fg->graph) {
2211  for (i = 0; i < fg->nb_inputs; i++) {
2212  if (!ifilter_has_all_input_formats(fg)) {
2213  AVFrame *tmp = av_frame_clone(frame);
2214  if (!tmp)
2215  return AVERROR(ENOMEM);
2216  av_frame_unref(frame);
2217 
2218  if (!av_fifo_space(ifilter->frame_queue)) {
2219  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2220  if (ret < 0) {
2221  av_frame_free(&tmp);
2222  return ret;
2223  }
2224  }
2225  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2226  return 0;
2227  }
2228  }
2229 
2230  ret = reap_filters(1);
2231  if (ret < 0 && ret != AVERROR_EOF) {
2232  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2233  return ret;
2234  }
2235 
2236  ret = configure_filtergraph(fg);
2237  if (ret < 0) {
2238  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2239  return ret;
2240  }
2241  }
2242 
2244  if (ret < 0) {
2245  if (ret != AVERROR_EOF)
2246  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2247  return ret;
2248  }
2249 
2250  return 0;
2251 }
2252 
2253 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2254 {
2255  int ret;
2256 
2257  ifilter->eof = 1;
2258 
2259  if (ifilter->filter) {
2260  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2261  if (ret < 0)
2262  return ret;
2263  } else {
2264  // the filtergraph was never configured
2265  if (ifilter->format < 0)
2266  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2267  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2268  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2269  return AVERROR_INVALIDDATA;
2270  }
2271  }
2272 
2273  return 0;
2274 }
2275 
2276 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2277 // There is the following difference: if you got a frame, you must call
2278 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2279 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2280 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2281 {
2282  int ret;
2283 
2284  *got_frame = 0;
2285 
2286  if (pkt) {
2287  ret = avcodec_send_packet(avctx, pkt);
2288  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2289  // decoded frames with avcodec_receive_frame() until done.
2290  if (ret < 0 && ret != AVERROR_EOF)
2291  return ret;
2292  }
2293 
2294  ret = avcodec_receive_frame(avctx, frame);
2295  if (ret < 0 && ret != AVERROR(EAGAIN))
2296  return ret;
2297  if (ret >= 0)
2298  *got_frame = 1;
2299 
2300  return 0;
2301 }
2302 
2304 {
2305  int i, ret;
2306  AVFrame *f;
2307 
2308  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2309  for (i = 0; i < ist->nb_filters; i++) {
2310  if (i < ist->nb_filters - 1) {
2311  f = ist->filter_frame;
2312  ret = av_frame_ref(f, decoded_frame);
2313  if (ret < 0)
2314  break;
2315  } else
2316  f = decoded_frame;
2317  ret = ifilter_send_frame(ist->filters[i], f);
2318  if (ret == AVERROR_EOF)
2319  ret = 0; /* ignore */
2320  if (ret < 0) {
2322  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2323  break;
2324  }
2325  }
2326  return ret;
2327 }
2328 
2330  int *decode_failed)
2331 {
2333  AVCodecContext *avctx = ist->dec_ctx;
2334  int ret, err = 0;
2335  AVRational decoded_frame_tb;
2336 
2337  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2338  return AVERROR(ENOMEM);
2339  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2340  return AVERROR(ENOMEM);
2341  decoded_frame = ist->decoded_frame;
2342 
2344  ret = decode(avctx, decoded_frame, got_output, pkt);
2345  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2346  if (ret < 0)
2347  *decode_failed = 1;
2348 
2349  if (ret >= 0 && avctx->sample_rate <= 0) {
2350  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2351  ret = AVERROR_INVALIDDATA;
2352  }
2353 
2354  if (ret != AVERROR_EOF)
2355  check_decode_result(ist, got_output, ret);
2356 
2357  if (!*got_output || ret < 0)
2358  return ret;
2359 
2360  ist->samples_decoded += decoded_frame->nb_samples;
2361  ist->frames_decoded++;
2362 
2363  /* increment next_dts to use for the case where the input stream does not
2364  have timestamps or there are multiple frames in the packet */
2365  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2366  avctx->sample_rate;
2367  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2368  avctx->sample_rate;
2369 
2370  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2371  decoded_frame_tb = ist->st->time_base;
2372  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2373  decoded_frame->pts = pkt->pts;
2374  decoded_frame_tb = ist->st->time_base;
2375  }else {
2376  decoded_frame->pts = ist->dts;
2377  decoded_frame_tb = AV_TIME_BASE_Q;
2378  }
2379  if (decoded_frame->pts != AV_NOPTS_VALUE)
2380  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2381  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2382  (AVRational){1, avctx->sample_rate});
2383  ist->nb_samples = decoded_frame->nb_samples;
2384  err = send_frame_to_filters(ist, decoded_frame);
2385 
2387  av_frame_unref(decoded_frame);
2388  return err < 0 ? err : ret;
2389 }
2390 
2391 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2392  int *decode_failed)
2393 {
2395  int i, ret = 0, err = 0;
2396  int64_t best_effort_timestamp;
2397  int64_t dts = AV_NOPTS_VALUE;
2398  AVPacket avpkt;
2399 
2400  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2401  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2402  // skip the packet.
2403  if (!eof && pkt && pkt->size == 0)
2404  return 0;
2405 
2406  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2407  return AVERROR(ENOMEM);
2408  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2409  return AVERROR(ENOMEM);
2410  decoded_frame = ist->decoded_frame;
2411  if (ist->dts != AV_NOPTS_VALUE)
2412  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2413  if (pkt) {
2414  avpkt = *pkt;
2415  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2416  }
2417 
2418  // The old code used to set dts on the drain packet, which does not work
2419  // with the new API anymore.
2420  if (eof) {
2421  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2422  if (!new)
2423  return AVERROR(ENOMEM);
2424  ist->dts_buffer = new;
2425  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2426  }
2427 
2429  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2430  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2431  if (ret < 0)
2432  *decode_failed = 1;
2433 
2434  // The following line may be required in some cases where there is no parser
2435  // or the parser does not has_b_frames correctly
2436  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2437  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2438  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2439  } else
2441  "video_delay is larger in decoder than demuxer %d > %d.\n"
2442  "If you want to help, upload a sample "
2443  "of this file to https://streams.videolan.org/upload/ "
2444  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2445  ist->dec_ctx->has_b_frames,
2446  ist->st->codecpar->video_delay);
2447  }
2448 
2449  if (ret != AVERROR_EOF)
2450  check_decode_result(ist, got_output, ret);
2451 
2452  if (*got_output && ret >= 0) {
2453  if (ist->dec_ctx->width != decoded_frame->width ||
2454  ist->dec_ctx->height != decoded_frame->height ||
2455  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2456  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2457  decoded_frame->width,
2458  decoded_frame->height,
2459  decoded_frame->format,
2460  ist->dec_ctx->width,
2461  ist->dec_ctx->height,
2462  ist->dec_ctx->pix_fmt);
2463  }
2464  }
2465 
2466  if (!*got_output || ret < 0)
2467  return ret;
2468 
2469  if(ist->top_field_first>=0)
2470  decoded_frame->top_field_first = ist->top_field_first;
2471 
2472  ist->frames_decoded++;
2473 
2474  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2475  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2476  if (err < 0)
2477  goto fail;
2478  }
2479  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2480 
2481  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2482  *duration_pts = decoded_frame->pkt_duration;
2483 
2484  if (ist->framerate.num)
2485  best_effort_timestamp = ist->cfr_next_pts++;
2486 
2487  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2488  best_effort_timestamp = ist->dts_buffer[0];
2489 
2490  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2491  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2492  ist->nb_dts_buffer--;
2493  }
2494 
2495  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2496  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2497 
2498  if (ts != AV_NOPTS_VALUE)
2499  ist->next_pts = ist->pts = ts;
2500  }
2501 
2502  if (debug_ts) {
2503  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2504  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2505  ist->st->index, av_ts2str(decoded_frame->pts),
2506  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2507  best_effort_timestamp,
2508  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2509  decoded_frame->key_frame, decoded_frame->pict_type,
2510  ist->st->time_base.num, ist->st->time_base.den);
2511  }
2512 
2513  if (ist->st->sample_aspect_ratio.num)
2514  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2515 
2516  err = send_frame_to_filters(ist, decoded_frame);
2517 
2518 fail:
2520  av_frame_unref(decoded_frame);
2521  return err < 0 ? err : ret;
2522 }
2523 
2525  int *decode_failed)
2526 {
2528  int free_sub = 1;
2529  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2530  &subtitle, got_output, pkt);
2531 
2532  check_decode_result(NULL, got_output, ret);
2533 
2534  if (ret < 0 || !*got_output) {
2535  *decode_failed = 1;
2536  if (!pkt->size)
2537  sub2video_flush(ist);
2538  return ret;
2539  }
2540 
2541  if (ist->fix_sub_duration) {
2542  int end = 1;
2543  if (ist->prev_sub.got_output) {
2544  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2545  1000, AV_TIME_BASE);
2546  if (end < ist->prev_sub.subtitle.end_display_time) {
2547  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2548  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2550  end <= 0 ? ", dropping it" : "");
2551  ist->prev_sub.subtitle.end_display_time = end;
2552  }
2553  }
2554  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2555  FFSWAP(int, ret, ist->prev_sub.ret);
2556  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2557  if (end <= 0)
2558  goto out;
2559  }
2560 
2561  if (!*got_output)
2562  return ret;
2563 
2564  if (ist->sub2video.frame) {
2565  sub2video_update(ist, INT64_MIN, &subtitle);
2566  } else if (ist->nb_filters) {
2567  if (!ist->sub2video.sub_queue)
2568  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2569  if (!ist->sub2video.sub_queue)
2570  exit_program(1);
2571  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2573  if (ret < 0)
2574  exit_program(1);
2575  }
2576  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2577  free_sub = 0;
2578  }
2579 
2580  if (!subtitle.num_rects)
2581  goto out;
2582 
2583  ist->frames_decoded++;
2584 
2585  for (i = 0; i < nb_output_streams; i++) {
2586  OutputStream *ost = output_streams[i];
2587 
2588  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2589  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2590  continue;
2591 
2592  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2593  }
2594 
2595 out:
2596  if (free_sub)
2597  avsubtitle_free(&subtitle);
2598  return ret;
2599 }
2600 
2602 {
2603  int i, ret;
2604  /* TODO keep pts also in stream time base to avoid converting back */
2605  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2607 
2608  for (i = 0; i < ist->nb_filters; i++) {
2609  ret = ifilter_send_eof(ist->filters[i], pts);
2610  if (ret < 0)
2611  return ret;
2612  }
2613  return 0;
2614 }
2615 
2616 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2617 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2618 {
2619  int ret = 0, i;
2620  int repeating = 0;
2621  int eof_reached = 0;
2622 
2623  AVPacket avpkt;
2624  if (!ist->saw_first_ts) {
2625  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2626  ist->pts = 0;
2627  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2628  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2629  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2630  }
2631  ist->saw_first_ts = 1;
2632  }
2633 
2634  if (ist->next_dts == AV_NOPTS_VALUE)
2635  ist->next_dts = ist->dts;
2636  if (ist->next_pts == AV_NOPTS_VALUE)
2637  ist->next_pts = ist->pts;
2638 
2639  if (!pkt) {
2640  /* EOF handling */
2641  av_init_packet(&avpkt);
2642  avpkt.data = NULL;
2643  avpkt.size = 0;
2644  } else {
2645  avpkt = *pkt;
2646  }
2647 
2648  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2649  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2650  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2651  ist->next_pts = ist->pts = ist->dts;
2652  }
2653 
2654  // while we have more to decode or while the decoder did output something on EOF
2655  while (ist->decoding_needed) {
2656  int64_t duration_dts = 0;
2657  int64_t duration_pts = 0;
2658  int got_output = 0;
2659  int decode_failed = 0;
2660 
2661  ist->pts = ist->next_pts;
2662  ist->dts = ist->next_dts;
2663 
2664  switch (ist->dec_ctx->codec_type) {
2665  case AVMEDIA_TYPE_AUDIO:
2666  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2667  &decode_failed);
2668  break;
2669  case AVMEDIA_TYPE_VIDEO:
2670  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2671  &decode_failed);
2672  if (!repeating || !pkt || got_output) {
2673  if (pkt && pkt->duration) {
2674  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2675  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2677  duration_dts = ((int64_t)AV_TIME_BASE *
2678  ist->dec_ctx->framerate.den * ticks) /
2680  }
2681 
2682  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2683  ist->next_dts += duration_dts;
2684  }else
2685  ist->next_dts = AV_NOPTS_VALUE;
2686  }
2687 
2688  if (got_output) {
2689  if (duration_pts > 0) {
2690  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2691  } else {
2692  ist->next_pts += duration_dts;
2693  }
2694  }
2695  break;
2696  case AVMEDIA_TYPE_SUBTITLE:
2697  if (repeating)
2698  break;
2699  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2700  if (!pkt && ret >= 0)
2701  ret = AVERROR_EOF;
2702  break;
2703  default:
2704  return -1;
2705  }
2706 
2707  if (ret == AVERROR_EOF) {
2708  eof_reached = 1;
2709  break;
2710  }
2711 
2712  if (ret < 0) {
2713  if (decode_failed) {
2714  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2715  ist->file_index, ist->st->index, av_err2str(ret));
2716  } else {
2717  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2718  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2719  }
2720  if (!decode_failed || exit_on_error)
2721  exit_program(1);
2722  break;
2723  }
2724 
2725  if (got_output)
2726  ist->got_output = 1;
2727 
2728  if (!got_output)
2729  break;
2730 
2731  // During draining, we might get multiple output frames in this loop.
2732  // ffmpeg.c does not drain the filter chain on configuration changes,
2733  // which means if we send multiple frames at once to the filters, and
2734  // one of those frames changes configuration, the buffered frames will
2735  // be lost. This can upset certain FATE tests.
2736  // Decode only 1 frame per call on EOF to appease these FATE tests.
2737  // The ideal solution would be to rewrite decoding to use the new
2738  // decoding API in a better way.
2739  if (!pkt)
2740  break;
2741 
2742  repeating = 1;
2743  }
2744 
2745  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2746  /* except when looping we need to flush but not to send an EOF */
2747  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2748  int ret = send_filter_eof(ist);
2749  if (ret < 0) {
2750  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2751  exit_program(1);
2752  }
2753  }
2754 
2755  /* handle stream copy */
2756  if (!ist->decoding_needed && pkt) {
2757  ist->dts = ist->next_dts;
2758  switch (ist->dec_ctx->codec_type) {
2759  case AVMEDIA_TYPE_AUDIO:
2760  av_assert1(pkt->duration >= 0);
2761  if (ist->dec_ctx->sample_rate) {
2762  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2763  ist->dec_ctx->sample_rate;
2764  } else {
2765  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2766  }
2767  break;
2768  case AVMEDIA_TYPE_VIDEO:
2769  if (ist->framerate.num) {
2770  // TODO: Remove work-around for c99-to-c89 issue 7
2771  AVRational time_base_q = AV_TIME_BASE_Q;
2772  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2773  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2774  } else if (pkt->duration) {
2775  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2776  } else if(ist->dec_ctx->framerate.num != 0) {
2777  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2778  ist->next_dts += ((int64_t)AV_TIME_BASE *
2779  ist->dec_ctx->framerate.den * ticks) /
2781  }
2782  break;
2783  }
2784  ist->pts = ist->dts;
2785  ist->next_pts = ist->next_dts;
2786  }
2787  for (i = 0; i < nb_output_streams; i++) {
2788  OutputStream *ost = output_streams[i];
2789 
2790  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2791  continue;
2792 
2793  do_streamcopy(ist, ost, pkt);
2794  }
2795 
2796  return !eof_reached;
2797 }
2798 
2799 static void print_sdp(void)
2800 {
2801  char sdp[16384];
2802  int i;
2803  int j;
2804  AVIOContext *sdp_pb;
2805  AVFormatContext **avc;
2806 
2807  for (i = 0; i < nb_output_files; i++) {
2808  if (!output_files[i]->header_written)
2809  return;
2810  }
2811 
2812  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2813  if (!avc)
2814  exit_program(1);
2815  for (i = 0, j = 0; i < nb_output_files; i++) {
2816  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2817  avc[j] = output_files[i]->ctx;
2818  j++;
2819  }
2820  }
2821 
2822  if (!j)
2823  goto fail;
2824 
2825  av_sdp_create(avc, j, sdp, sizeof(sdp));
2826 
2827  if (!sdp_filename) {
2828  printf("SDP:\n%s\n", sdp);
2829  fflush(stdout);
2830  } else {
2831  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2832  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2833  } else {
2834  avio_print(sdp_pb, sdp);
2835  avio_closep(&sdp_pb);
2837  }
2838  }
2839 
2840 fail:
2841  av_freep(&avc);
2842 }
2843 
2845 {
2846  InputStream *ist = s->opaque;
2847  const enum AVPixelFormat *p;
2848  int ret;
2849 
2850  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2852  const AVCodecHWConfig *config = NULL;
2853  int i;
2854 
2855  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2856  break;
2857 
2858  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2859  ist->hwaccel_id == HWACCEL_AUTO) {
2860  for (i = 0;; i++) {
2861  config = avcodec_get_hw_config(s->codec, i);
2862  if (!config)
2863  break;
2864  if (!(config->methods &
2866  continue;
2867  if (config->pix_fmt == *p)
2868  break;
2869  }
2870  }
2871  if (config) {
2872  if (config->device_type != ist->hwaccel_device_type) {
2873  // Different hwaccel offered, ignore.
2874  continue;
2875  }
2876 
2877  ret = hwaccel_decode_init(s);
2878  if (ret < 0) {
2879  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2881  "%s hwaccel requested for input stream #%d:%d, "
2882  "but cannot be initialized.\n",
2884  ist->file_index, ist->st->index);
2885  return AV_PIX_FMT_NONE;
2886  }
2887  continue;
2888  }
2889  } else {
2890  const HWAccel *hwaccel = NULL;
2891  int i;
2892  for (i = 0; hwaccels[i].name; i++) {
2893  if (hwaccels[i].pix_fmt == *p) {
2894  hwaccel = &hwaccels[i];
2895  break;
2896  }
2897  }
2898  if (!hwaccel) {
2899  // No hwaccel supporting this pixfmt.
2900  continue;
2901  }
2902  if (hwaccel->id != ist->hwaccel_id) {
2903  // Does not match requested hwaccel.
2904  continue;
2905  }
2906 
2907  ret = hwaccel->init(s);
2908  if (ret < 0) {
2910  "%s hwaccel requested for input stream #%d:%d, "
2911  "but cannot be initialized.\n", hwaccel->name,
2912  ist->file_index, ist->st->index);
2913  return AV_PIX_FMT_NONE;
2914  }
2915  }
2916 
2917  if (ist->hw_frames_ctx) {
2919  if (!s->hw_frames_ctx)
2920  return AV_PIX_FMT_NONE;
2921  }
2922 
2923  ist->hwaccel_pix_fmt = *p;
2924  break;
2925  }
2926 
2927  return *p;
2928 }
2929 
2930 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2931 {
2932  InputStream *ist = s->opaque;
2933 
2934  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2935  return ist->hwaccel_get_buffer(s, frame, flags);
2936 
2937  return avcodec_default_get_buffer2(s, frame, flags);
2938 }
2939 
2940 static int init_input_stream(int ist_index, char *error, int error_len)
2941 {
2942  int ret;
2943  InputStream *ist = input_streams[ist_index];
2944 
2945  if (ist->decoding_needed) {
2946  AVCodec *codec = ist->dec;
2947  if (!codec) {
2948  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2949  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2950  return AVERROR(EINVAL);
2951  }
2952 
2953  ist->dec_ctx->opaque = ist;
2954  ist->dec_ctx->get_format = get_format;
2955  ist->dec_ctx->get_buffer2 = get_buffer;
2956 #if LIBAVCODEC_VERSION_MAJOR < 60
2957  ist->dec_ctx->thread_safe_callbacks = 1;
2958 #endif
2959 
2960  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2961  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2962  (ist->decoding_needed & DECODING_FOR_OST)) {
2963  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2965  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2966  }
2967 
2968  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2969 
2970  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2971  * audio, and video decoders such as cuvid or mediacodec */
2972  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2973 
2974  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2975  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2976  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2978  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2979 
2980  ret = hw_device_setup_for_decode(ist);
2981  if (ret < 0) {
2982  snprintf(error, error_len, "Device setup failed for "
2983  "decoder on input stream #%d:%d : %s",
2984  ist->file_index, ist->st->index, av_err2str(ret));
2985  return ret;
2986  }
2987 
2988  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2989  if (ret == AVERROR_EXPERIMENTAL)
2990  abort_codec_experimental(codec, 0);
2991 
2992  snprintf(error, error_len,
2993  "Error while opening decoder for input stream "
2994  "#%d:%d : %s",
2995  ist->file_index, ist->st->index, av_err2str(ret));
2996  return ret;
2997  }
2999  }
3000 
3001  ist->next_pts = AV_NOPTS_VALUE;
3002  ist->next_dts = AV_NOPTS_VALUE;
3003 
3004  return 0;
3005 }
3006 
3008 {
3009  if (ost->source_index >= 0)
3010  return input_streams[ost->source_index];
3011  return NULL;
3012 }
3013 
3014 static int compare_int64(const void *a, const void *b)
3015 {
3016  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3017 }
3018 
3019 /* open the muxer when all the streams are initialized */
3021 {
3022  int ret, i;
3023 
3024  for (i = 0; i < of->ctx->nb_streams; i++) {
3025  OutputStream *ost = output_streams[of->ost_index + i];
3026  if (!ost->initialized)
3027  return 0;
3028  }
3029 
3030  of->ctx->interrupt_callback = int_cb;
3031 
3032  ret = avformat_write_header(of->ctx, &of->opts);
3033  if (ret < 0) {
3035  "Could not write header for output file #%d "
3036  "(incorrect codec parameters ?): %s\n",
3037  file_index, av_err2str(ret));
3038  return ret;
3039  }
3040  //assert_avoptions(of->opts);
3041  of->header_written = 1;
3042 
3043  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3044  nb_output_dumped++;
3045 
3046  if (sdp_filename || want_sdp)
3047  print_sdp();
3048 
3049  /* flush the muxing queues */
3050  for (i = 0; i < of->ctx->nb_streams; i++) {
3051  OutputStream *ost = output_streams[of->ost_index + i];
3052 
3053  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3054  if (!av_fifo_size(ost->muxing_queue))
3055  ost->mux_timebase = ost->st->time_base;
3056 
3057  while (av_fifo_size(ost->muxing_queue)) {
3058  AVPacket pkt;
3059  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3060  ost->muxing_queue_data_size -= pkt.size;
3061  write_packet(of, &pkt, ost, 1);
3062  }
3063  }
3064 
3065  return 0;
3066 }
3067 
3069 {
3070  AVBSFContext *ctx = ost->bsf_ctx;
3071  int ret;
3072 
3073  if (!ctx)
3074  return 0;
3075 
3076  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3077  if (ret < 0)
3078  return ret;
3079 
3080  ctx->time_base_in = ost->st->time_base;
3081 
3082  ret = av_bsf_init(ctx);
3083  if (ret < 0) {
3084  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3085  ctx->filter->name);
3086  return ret;
3087  }
3088 
3089  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3090  if (ret < 0)
3091  return ret;
3092  ost->st->time_base = ctx->time_base_out;
3093 
3094  return 0;
3095 }
3096 
3098 {
3099  OutputFile *of = output_files[ost->file_index];
3100  InputStream *ist = get_input_stream(ost);
3101  AVCodecParameters *par_dst = ost->st->codecpar;
3102  AVCodecParameters *par_src = ost->ref_par;
3103  AVRational sar;
3104  int i, ret;
3105  uint32_t codec_tag = par_dst->codec_tag;
3106 
3107  av_assert0(ist && !ost->filter);
3108 
3109  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3110  if (ret >= 0)
3111  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3112  if (ret < 0) {
3114  "Error setting up codec context options.\n");
3115  return ret;
3116  }
3117 
3118  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3119  if (ret < 0) {
3121  "Error getting reference codec parameters.\n");
3122  return ret;
3123  }
3124 
3125  if (!codec_tag) {
3126  unsigned int codec_tag_tmp;
3127  if (!of->ctx->oformat->codec_tag ||
3128  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3129  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3130  codec_tag = par_src->codec_tag;
3131  }
3132 
3133  ret = avcodec_parameters_copy(par_dst, par_src);
3134  if (ret < 0)
3135  return ret;
3136 
3137  par_dst->codec_tag = codec_tag;
3138 
3139  if (!ost->frame_rate.num)
3140  ost->frame_rate = ist->framerate;
3141  ost->st->avg_frame_rate = ost->frame_rate;
3142 
3144  if (ret < 0)
3145  return ret;
3146 
3147  // copy timebase while removing common factors
3148  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3150 
3151  // copy estimated duration as a hint to the muxer
3152  if (ost->st->duration <= 0 && ist->st->duration > 0)
3153  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3154 
3155  // copy disposition
3156  ost->st->disposition = ist->st->disposition;
3157 
3158  if (ist->st->nb_side_data) {
3159  for (i = 0; i < ist->st->nb_side_data; i++) {
3160  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3161  uint8_t *dst_data;
3162 
3163  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3164  if (!dst_data)
3165  return AVERROR(ENOMEM);
3166  memcpy(dst_data, sd_src->data, sd_src->size);
3167  }
3168  }
3169 
3170  if (ost->rotate_overridden) {
3172  sizeof(int32_t) * 9);
3173  if (sd)
3175  }
3176 
3177  switch (par_dst->codec_type) {
3178  case AVMEDIA_TYPE_AUDIO:
3179  if (audio_volume != 256) {
3180  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3181  exit_program(1);
3182  }
3183  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3184  par_dst->block_align= 0;
3185  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3186  par_dst->block_align= 0;
3187  break;
3188  case AVMEDIA_TYPE_VIDEO:
3189  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3190  sar =
3192  (AVRational){ par_dst->height, par_dst->width });
3193  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3194  "with stream copy may produce invalid files\n");
3195  }
3196  else if (ist->st->sample_aspect_ratio.num)
3197  sar = ist->st->sample_aspect_ratio;
3198  else
3199  sar = par_src->sample_aspect_ratio;
3200  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3201  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3202  ost->st->r_frame_rate = ist->st->r_frame_rate;
3203  break;
3204  }
3205 
3206  ost->mux_timebase = ist->st->time_base;
3207 
3208  return 0;
3209 }
3210 
3212 {
3213  AVDictionaryEntry *e;
3214 
3215  uint8_t *encoder_string;
3216  int encoder_string_len;
3217  int format_flags = 0;
3218  int codec_flags = ost->enc_ctx->flags;
3219 
3220  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3221  return;
3222 
3223  e = av_dict_get(of->opts, "fflags", NULL, 0);
3224  if (e) {
3225  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3226  if (!o)
3227  return;
3228  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3229  }
3230  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3231  if (e) {
3232  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3233  if (!o)
3234  return;
3235  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3236  }
3237 
3238  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3239  encoder_string = av_mallocz(encoder_string_len);
3240  if (!encoder_string)
3241  exit_program(1);
3242 
3243  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3244  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3245  else
3246  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3247  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3248  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3250 }
3251 
3252 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3253  AVCodecContext *avctx)
3254 {
3255  char *p;
3256  int n = 1, i, size, index = 0;
3257  int64_t t, *pts;
3258 
3259  for (p = kf; *p; p++)
3260  if (*p == ',')
3261  n++;
3262  size = n;
3263  pts = av_malloc_array(size, sizeof(*pts));
3264  if (!pts) {
3265  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3266  exit_program(1);
3267  }
3268 
3269  p = kf;
3270  for (i = 0; i < n; i++) {
3271  char *next = strchr(p, ',');
3272 
3273  if (next)
3274  *next++ = 0;
3275 
3276  if (!memcmp(p, "chapters", 8)) {
3277 
3278  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3279  int j;
3280 
3281  if (avf->nb_chapters > INT_MAX - size ||
3282  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3283  sizeof(*pts)))) {
3285  "Could not allocate forced key frames array.\n");
3286  exit_program(1);
3287  }
3288  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3289  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3290 
3291  for (j = 0; j < avf->nb_chapters; j++) {
3292  AVChapter *c = avf->chapters[j];
3293  av_assert1(index < size);
3294  pts[index++] = av_rescale_q(c->start, c->time_base,
3295  avctx->time_base) + t;
3296  }
3297 
3298  } else {
3299 
3300  t = parse_time_or_die("force_key_frames", p, 1);
3301  av_assert1(index < size);
3302  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3303 
3304  }
3305 
3306  p = next;
3307  }
3308 
3309  av_assert0(index == size);
3310  qsort(pts, size, sizeof(*pts), compare_int64);
3311  ost->forced_kf_count = size;
3312  ost->forced_kf_pts = pts;
3313 }
3314 
3315 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3316 {
3317  InputStream *ist = get_input_stream(ost);
3318  AVCodecContext *enc_ctx = ost->enc_ctx;
3319  AVFormatContext *oc;
3320 
3321  if (ost->enc_timebase.num > 0) {
3322  enc_ctx->time_base = ost->enc_timebase;
3323  return;
3324  }
3325 
3326  if (ost->enc_timebase.num < 0) {
3327  if (ist) {
3328  enc_ctx->time_base = ist->st->time_base;
3329  return;
3330  }
3331 
3332  oc = output_files[ost->file_index]->ctx;
3333  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3334  }
3335 
3336  enc_ctx->time_base = default_time_base;
3337 }
3338 
3340 {
3341  InputStream *ist = get_input_stream(ost);
3342  AVCodecContext *enc_ctx = ost->enc_ctx;
3344  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3345  int j, ret;
3346 
3347  set_encoder_id(output_files[ost->file_index], ost);
3348 
3349  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3350  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3351  // which have to be filtered out to prevent leaking them to output files.
3352  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3353 
3354  if (ist) {
3355  ost->st->disposition = ist->st->disposition;
3356 
3357  dec_ctx = ist->dec_ctx;
3358 
3359  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3360  } else {
3361  for (j = 0; j < oc->nb_streams; j++) {
3362  AVStream *st = oc->streams[j];
3363  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3364  break;
3365  }
3366  if (j == oc->nb_streams)
3367  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3370  }
3371 
3372  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3373  if (!ost->frame_rate.num)
3375  if (ist && !ost->frame_rate.num)
3376  ost->frame_rate = ist->framerate;
3377  if (ist && !ost->frame_rate.num)
3378  ost->frame_rate = ist->st->r_frame_rate;
3379  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3380  ost->frame_rate = (AVRational){25, 1};
3382  "No information "
3383  "about the input framerate is available. Falling "
3384  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3385  "if you want a different framerate.\n",
3386  ost->file_index, ost->index);
3387  }
3388 
3389  if (ost->max_frame_rate.num &&
3390  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3391  !ost->frame_rate.den))
3392  ost->frame_rate = ost->max_frame_rate;
3393 
3394  if (ost->enc->supported_framerates && !ost->force_fps) {
3395  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3396  ost->frame_rate = ost->enc->supported_framerates[idx];
3397  }
3398  // reduce frame rate for mpeg4 to be within the spec limits
3399  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3400  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3401  ost->frame_rate.num, ost->frame_rate.den, 65535);
3402  }
3403  }
3404 
3405  switch (enc_ctx->codec_type) {
3406  case AVMEDIA_TYPE_AUDIO:
3408  if (dec_ctx)
3409  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3410  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3414 
3415  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3416  break;
3417 
3418  case AVMEDIA_TYPE_VIDEO:
3420 
3421  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3423  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3425  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3426  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3427  }
3428 
3429  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3430  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3431  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3432  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3433  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3435 
3436  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3437  if (dec_ctx)
3438  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3439  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3440 
3441  if (frame) {
3442  enc_ctx->color_range = frame->color_range;
3443  enc_ctx->color_primaries = frame->color_primaries;
3444  enc_ctx->color_trc = frame->color_trc;
3445  enc_ctx->colorspace = frame->colorspace;
3446  enc_ctx->chroma_sample_location = frame->chroma_location;
3447  }
3448 
3449  enc_ctx->framerate = ost->frame_rate;
3450 
3451  ost->st->avg_frame_rate = ost->frame_rate;
3452 
3453  if (!dec_ctx ||
3454  enc_ctx->width != dec_ctx->width ||
3455  enc_ctx->height != dec_ctx->height ||
3456  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3458  }
3459 
3460  if (ost->top_field_first == 0) {
3461  enc_ctx->field_order = AV_FIELD_BB;
3462  } else if (ost->top_field_first == 1) {
3463  enc_ctx->field_order = AV_FIELD_TT;
3464  }
3465 
3466  if (frame) {
3468  ost->top_field_first >= 0)
3469  frame->top_field_first = !!ost->top_field_first;
3470 
3471  if (frame->interlaced_frame) {
3472  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3473  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3474  else
3475  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3476  } else
3477  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3478  }
3479 
3480  if (ost->forced_keyframes) {
3481  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3484  if (ret < 0) {
3486  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3487  return ret;
3488  }
3493 
3494  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3495  // parse it only for static kf timings
3496  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3498  }
3499  }
3500  break;
3501  case AVMEDIA_TYPE_SUBTITLE:
3502  enc_ctx->time_base = AV_TIME_BASE_Q;
3503  if (!enc_ctx->width) {
3504  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3505  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3506  }
3507  break;
3508  case AVMEDIA_TYPE_DATA:
3509  break;
3510  default:
3511  abort();
3512  break;
3513  }
3514 
3515  ost->mux_timebase = enc_ctx->time_base;
3516 
3517  return 0;
3518 }
3519 
3520 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3521  char *error, int error_len)
3522 {
3523  int ret = 0;
3524 
3525  if (ost->encoding_needed) {
3526  AVCodec *codec = ost->enc;
3527  AVCodecContext *dec = NULL;
3528  InputStream *ist;
3529 
3530  ret = init_output_stream_encode(ost, frame);
3531  if (ret < 0)
3532  return ret;
3533 
3534  if ((ist = get_input_stream(ost)))
3535  dec = ist->dec_ctx;
3536  if (dec && dec->subtitle_header) {
3537  /* ASS code assumes this buffer is null terminated so add extra byte. */
3539  if (!ost->enc_ctx->subtitle_header)
3540  return AVERROR(ENOMEM);
3541  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3543  }
3544  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3545  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3546  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3547  !codec->defaults &&
3548  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3549  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3550  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3551 
3552  ret = hw_device_setup_for_encode(ost);
3553  if (ret < 0) {
3554  snprintf(error, error_len, "Device setup failed for "
3555  "encoder on output stream #%d:%d : %s",
3556  ost->file_index, ost->index, av_err2str(ret));
3557  return ret;
3558  }
3559 
3560  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3561  int input_props = 0, output_props = 0;
3562  AVCodecDescriptor const *input_descriptor =
3564  AVCodecDescriptor const *output_descriptor =
3566  if (input_descriptor)
3567  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3568  if (output_descriptor)
3569  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3570  if (input_props && output_props && input_props != output_props) {
3571  snprintf(error, error_len,
3572  "Subtitle encoding currently only possible from text to text "
3573  "or bitmap to bitmap");
3574  return AVERROR_INVALIDDATA;
3575  }
3576  }
3577 
3578  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3579  if (ret == AVERROR_EXPERIMENTAL)
3580  abort_codec_experimental(codec, 1);
3581  snprintf(error, error_len,
3582  "Error while opening encoder for output stream #%d:%d - "
3583  "maybe incorrect parameters such as bit_rate, rate, width or height",
3584  ost->file_index, ost->index);
3585  return ret;
3586  }
3587  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3588  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3590  ost->enc_ctx->frame_size);
3592  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3593  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3594  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3595  " It takes bits/s as argument, not kbits/s\n");
3596 
3598  if (ret < 0) {
3600  "Error initializing the output stream codec context.\n");
3601  exit_program(1);
3602  }
3603 
3604  if (ost->enc_ctx->nb_coded_side_data) {
3605  int i;
3606 
3607  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3608  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3609  uint8_t *dst_data;
3610 
3611  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3612  if (!dst_data)
3613  return AVERROR(ENOMEM);
3614  memcpy(dst_data, sd_src->data, sd_src->size);
3615  }
3616  }
3617 
3618  /*
3619  * Add global input side data. For now this is naive, and copies it
3620  * from the input stream's global side data. All side data should
3621  * really be funneled over AVFrame and libavfilter, then added back to
3622  * packet side data, and then potentially using the first packet for
3623  * global side data.
3624  */
3625  if (ist) {
3626  int i;
3627  for (i = 0; i < ist->st->nb_side_data; i++) {
3628  AVPacketSideData *sd = &ist->st->side_data[i];
3629  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3630  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3631  if (!dst)
3632  return AVERROR(ENOMEM);
3633  memcpy(dst, sd->data, sd->size);
3634  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3635  av_display_rotation_set((uint32_t *)dst, 0);
3636  }
3637  }
3638  }
3639 
3640  // copy timebase while removing common factors
3641  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3642  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3643 
3644  // copy estimated duration as a hint to the muxer
3645  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3646  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3647  } else if (ost->stream_copy) {
3648  ret = init_output_stream_streamcopy(ost);
3649  if (ret < 0)
3650  return ret;
3651  }
3652 
3653  // parse user provided disposition, and update stream values
3654  if (ost->disposition) {
3655  static const AVOption opts[] = {
3656  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3657  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3658  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3659  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3660  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3661  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3662  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3663  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3664  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3665  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3666  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3667  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3668  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3669  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3670  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3671  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3672  { NULL },
3673  };
3674  static const AVClass class = {
3675  .class_name = "",
3676  .item_name = av_default_item_name,
3677  .option = opts,
3678  .version = LIBAVUTIL_VERSION_INT,
3679  };
3680  const AVClass *pclass = &class;
3681 
3682  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3683  if (ret < 0)
3684  return ret;
3685  }
3686 
3687  /* initialize bitstream filters for the output stream
3688  * needs to be done here, because the codec id for streamcopy is not
3689  * known until now */
3690  ret = init_output_bsfs(ost);
3691  if (ret < 0)
3692  return ret;
3693 
3694  ost->initialized = 1;
3695 
3696  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3697  if (ret < 0)
3698  return ret;
3699 
3700  return ret;
3701 }
3702 
3703 static void report_new_stream(int input_index, AVPacket *pkt)
3704 {
3705  InputFile *file = input_files[input_index];
3706  AVStream *st = file->ctx->streams[pkt->stream_index];
3707 
3708  if (pkt->stream_index < file->nb_streams_warn)
3709  return;
3710  av_log(file->ctx, AV_LOG_WARNING,
3711  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3713  input_index, pkt->stream_index,
3714  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3715  file->nb_streams_warn = pkt->stream_index + 1;
3716 }
3717 
3718 static int transcode_init(void)
3719 {
3720  int ret = 0, i, j, k;
3721  AVFormatContext *oc;
3722  OutputStream *ost;
3723  InputStream *ist;
3724  char error[1024] = {0};
3725 
3726  for (i = 0; i < nb_filtergraphs; i++) {
3727  FilterGraph *fg = filtergraphs[i];
3728  for (j = 0; j < fg->nb_outputs; j++) {
3729  OutputFilter *ofilter = fg->outputs[j];
3730  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3731  continue;
3732  if (fg->nb_inputs != 1)
3733  continue;
3734  for (k = nb_input_streams-1; k >= 0 ; k--)
3735  if (fg->inputs[0]->ist == input_streams[k])
3736  break;
3737  ofilter->ost->source_index = k;
3738  }
3739  }
3740 
3741  /* init framerate emulation */
3742  for (i = 0; i < nb_input_files; i++) {
3743  InputFile *ifile = input_files[i];
3744  if (ifile->rate_emu)
3745  for (j = 0; j < ifile->nb_streams; j++)
3746  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3747  }
3748 
3749  /* init input streams */
3750  for (i = 0; i < nb_input_streams; i++)
3751  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3752  for (i = 0; i < nb_output_streams; i++) {
3753  ost = output_streams[i];
3754  avcodec_close(ost->enc_ctx);
3755  }
3756  goto dump_format;
3757  }
3758 
3759  /*
3760  * initialize stream copy and subtitle/data streams.
3761  * Encoded AVFrame based streams will get initialized as follows:
3762  * - when the first AVFrame is received in do_video_out
3763  * - just before the first AVFrame is received in either transcode_step
3764  * or reap_filters due to us requiring the filter chain buffer sink
3765  * to be configured with the correct audio frame size, which is only
3766  * known after the encoder is initialized.
3767  */
3768  for (i = 0; i < nb_output_streams; i++) {
3769  if (!output_streams[i]->stream_copy &&
3770  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3771  output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3772  continue;
3773 
3774  ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3775  if (ret < 0)
3776  goto dump_format;
3777  }
3778 
3779  /* discard unused programs */
3780  for (i = 0; i < nb_input_files; i++) {
3781  InputFile *ifile = input_files[i];
3782  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3783  AVProgram *p = ifile->ctx->programs[j];
3784  int discard = AVDISCARD_ALL;
3785 
3786  for (k = 0; k < p->nb_stream_indexes; k++)
3787  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3788  discard = AVDISCARD_DEFAULT;
3789  break;
3790  }
3791  p->discard = discard;
3792  }
3793  }
3794 
3795  /* write headers for files with no streams */
3796  for (i = 0; i < nb_output_files; i++) {
3797  oc = output_files[i]->ctx;
3798  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3799  ret = check_init_output_file(output_files[i], i);
3800  if (ret < 0)
3801  goto dump_format;
3802  }
3803  }
3804 
3805  dump_format:
3806  /* dump the stream mapping */
3807  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3808  for (i = 0; i < nb_input_streams; i++) {
3809  ist = input_streams[i];
3810 
3811  for (j = 0; j < ist->nb_filters; j++) {
3812  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3813  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3814  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3815  ist->filters[j]->name);
3816  if (nb_filtergraphs > 1)
3817  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3818  av_log(NULL, AV_LOG_INFO, "\n");
3819  }
3820  }
3821  }
3822 
3823  for (i = 0; i < nb_output_streams; i++) {
3824  ost = output_streams[i];
3825 
3826  if (ost->attachment_filename) {
3827  /* an attached file */
3828  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3829  ost->attachment_filename, ost->file_index, ost->index);
3830  continue;
3831  }
3832 
3833  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3834  /* output from a complex graph */
3835  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3836  if (nb_filtergraphs > 1)
3837  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3838 
3839  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3840  ost->index, ost->enc ? ost->enc->name : "?");
3841  continue;
3842  }
3843 
3844  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3845  input_streams[ost->source_index]->file_index,
3846  input_streams[ost->source_index]->st->index,
3847  ost->file_index,
3848  ost->index);
3849  if (ost->sync_ist != input_streams[ost->source_index])
3850  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3851  ost->sync_ist->file_index,
3852  ost->sync_ist->st->index);
3853  if (ost->stream_copy)
3854  av_log(NULL, AV_LOG_INFO, " (copy)");
3855  else {
3856  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3857  const AVCodec *out_codec = ost->enc;
3858  const char *decoder_name = "?";
3859  const char *in_codec_name = "?";
3860  const char *encoder_name = "?";
3861  const char *out_codec_name = "?";
3862  const AVCodecDescriptor *desc;
3863 
3864  if (in_codec) {
3865  decoder_name = in_codec->name;
3866  desc = avcodec_descriptor_get(in_codec->id);
3867  if (desc)
3868  in_codec_name = desc->name;
3869  if (!strcmp(decoder_name, in_codec_name))
3870  decoder_name = "native";
3871  }
3872 
3873  if (out_codec) {
3874  encoder_name = out_codec->name;
3875  desc = avcodec_descriptor_get(out_codec->id);
3876  if (desc)
3877  out_codec_name = desc->name;
3878  if (!strcmp(encoder_name, out_codec_name))
3879  encoder_name = "native";
3880  }
3881 
3882  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3883  in_codec_name, decoder_name,
3884  out_codec_name, encoder_name);
3885  }
3886  av_log(NULL, AV_LOG_INFO, "\n");
3887  }
3888 
3889  if (ret) {
3890  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3891  return ret;
3892  }
3893 
3895 
3896  return 0;
3897 }
3898 
3899 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3900 static int need_output(void)
3901 {
3902  int i;
3903 
3904  for (i = 0; i < nb_output_streams; i++) {
3905  OutputStream *ost = output_streams[i];
3906  OutputFile *of = output_files[ost->file_index];
3907  AVFormatContext *os = output_files[ost->file_index]->ctx;
3908 
3909  if (ost->finished ||
3910  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3911  continue;
3912  if (ost->frame_number >= ost->max_frames) {
3913  int j;
3914  for (j = 0; j < of->ctx->nb_streams; j++)
3915  close_output_stream(output_streams[of->ost_index + j]);
3916  continue;
3917  }
3918 
3919  return 1;
3920  }
3921 
3922  return 0;
3923 }
3924 
3925 /**
3926  * Select the output stream to process.
3927  *
3928  * @return selected output stream, or NULL if none available
3929  */
3931 {
3932  int i;
3933  int64_t opts_min = INT64_MAX;
3934  OutputStream *ost_min = NULL;
3935 
3936  for (i = 0; i < nb_output_streams; i++) {
3937  OutputStream *ost = output_streams[i];
3938  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3939  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3940  AV_TIME_BASE_Q);
3941  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3943  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3944  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3945 
3946  if (!ost->initialized && !ost->inputs_done)
3947  return ost;
3948 
3949  if (!ost->finished && opts < opts_min) {
3950  opts_min = opts;
3951  ost_min = ost->unavailable ? NULL : ost;
3952  }
3953  }
3954  return ost_min;
3955 }
3956 
3957 static void set_tty_echo(int on)
3958 {
3959 #if HAVE_TERMIOS_H
3960  struct termios tty;
3961  if (tcgetattr(0, &tty) == 0) {
3962  if (on) tty.c_lflag |= ECHO;
3963  else tty.c_lflag &= ~ECHO;
3964  tcsetattr(0, TCSANOW, &tty);
3965  }
3966 #endif
3967 }
3968 
3969 static int check_keyboard_interaction(int64_t cur_time)
3970 {
3971  int i, ret, key;
3972  static int64_t last_time;
3973  if (received_nb_signals)
3974  return AVERROR_EXIT;
3975  /* read_key() returns 0 on EOF */
3976  if(cur_time - last_time >= 100000 && !run_as_daemon){
3977  key = read_key();
3978  last_time = cur_time;
3979  }else
3980  key = -1;
3981  if (key == 'q')
3982  return AVERROR_EXIT;
3983  if (key == '+') av_log_set_level(av_log_get_level()+10);
3984  if (key == '-') av_log_set_level(av_log_get_level()-10);
3985  if (key == 's') qp_hist ^= 1;
3986  if (key == 'h'){
3987  if (do_hex_dump){
3988  do_hex_dump = do_pkt_dump = 0;
3989  } else if(do_pkt_dump){
3990  do_hex_dump = 1;
3991  } else
3992  do_pkt_dump = 1;
3994  }
3995  if (key == 'c' || key == 'C'){
3996  char buf[4096], target[64], command[256], arg[256] = {0};
3997  double time;
3998  int k, n = 0;
3999  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4000  i = 0;
4001  set_tty_echo(1);
4002  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4003  if (k > 0)
4004  buf[i++] = k;
4005  buf[i] = 0;
4006  set_tty_echo(0);
4007  fprintf(stderr, "\n");
4008  if (k > 0 &&
4009  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4010  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4011  target, time, command, arg);
4012  for (i = 0; i < nb_filtergraphs; i++) {
4013  FilterGraph *fg = filtergraphs[i];
4014  if (fg->graph) {
4015  if (time < 0) {
4016  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4017  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4018  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4019  } else if (key == 'c') {
4020  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4021  ret = AVERROR_PATCHWELCOME;
4022  } else {
4023  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4024  if (ret < 0)
4025  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4026  }
4027  }
4028  }
4029  } else {
4031  "Parse error, at least 3 arguments were expected, "
4032  "only %d given in string '%s'\n", n, buf);
4033  }
4034  }
4035  if (key == 'd' || key == 'D'){
4036  int debug=0;
4037  if(key == 'D') {
4038  debug = input_streams[0]->dec_ctx->debug << 1;
4039  if(!debug) debug = 1;
4040  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4041  debug += debug;
4042  }else{
4043  char buf[32];
4044  int k = 0;
4045  i = 0;
4046  set_tty_echo(1);
4047  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4048  if (k > 0)
4049  buf[i++] = k;
4050  buf[i] = 0;
4051  set_tty_echo(0);
4052  fprintf(stderr, "\n");
4053  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4054  fprintf(stderr,"error parsing debug value\n");
4055  }
4056  for(i=0;i<nb_input_streams;i++) {
4057  input_streams[i]->dec_ctx->debug = debug;
4058  }
4059  for(i=0;i<nb_output_streams;i++) {
4060  OutputStream *ost = output_streams[i];
4061  ost->enc_ctx->debug = debug;
4062  }
4063  if(debug) av_log_set_level(AV_LOG_DEBUG);
4064  fprintf(stderr,"debug=%d\n", debug);
4065  }
4066  if (key == '?'){
4067  fprintf(stderr, "key function\n"
4068  "? show this help\n"
4069  "+ increase verbosity\n"
4070  "- decrease verbosity\n"
4071  "c Send command to first matching filter supporting it\n"
4072  "C Send/Queue command to all matching filters\n"
4073  "D cycle through available debug modes\n"
4074  "h dump packets/hex press to cycle through the 3 states\n"
4075  "q quit\n"
4076  "s Show QP histogram\n"
4077  );
4078  }
4079  return 0;
4080 }
4081 
4082 #if HAVE_THREADS
4083 static void *input_thread(void *arg)
4084 {
4085  InputFile *f = arg;
4086  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4087  int ret = 0;
4088 
4089  while (1) {
4090  AVPacket pkt;
4091  ret = av_read_frame(f->ctx, &pkt);
4092 
4093  if (ret == AVERROR(EAGAIN)) {
4094  av_usleep(10000);
4095  continue;
4096  }
4097  if (ret < 0) {
4098  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4099  break;
4100  }
4101  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4102  if (flags && ret == AVERROR(EAGAIN)) {
4103  flags = 0;
4104  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4106  "Thread message queue blocking; consider raising the "
4107  "thread_queue_size option (current value: %d)\n",
4108  f->thread_queue_size);
4109  }
4110  if (ret < 0) {
4111  if (ret != AVERROR_EOF)
4112  av_log(f->ctx, AV_LOG_ERROR,
4113  "Unable to send packet to main thread: %s\n",
4114  av_err2str(ret));
4115  av_packet_unref(&pkt);
4116  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4117  break;
4118  }
4119  }
4120 
4121  return NULL;
4122 }
4123 
4124 static void free_input_thread(int i)
4125 {
4126  InputFile *f = input_files[i];
4127  AVPacket pkt;
4128 
4129  if (!f || !f->in_thread_queue)
4130  return;
4132  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4133  av_packet_unref(&pkt);
4134 
4135  pthread_join(f->thread, NULL);
4136  f->joined = 1;
4137  av_thread_message_queue_free(&f->in_thread_queue);
4138 }
4139 
4140 static void free_input_threads(void)
4141 {
4142  int i;
4143 
4144  for (i = 0; i < nb_input_files; i++)
4145  free_input_thread(i);
4146 }
4147 
4148 static int init_input_thread(int i)
4149 {
4150  int ret;
4151  InputFile *f = input_files[i];
4152 
4153  if (f->thread_queue_size < 0)
4154  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4155  if (!f->thread_queue_size)
4156  return 0;
4157 
4158  if (f->ctx->pb ? !f->ctx->pb->seekable :
4159  strcmp(f->ctx->iformat->name, "lavfi"))
4160  f->non_blocking = 1;
4161  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4162  f->thread_queue_size, sizeof(AVPacket));
4163  if (ret < 0)
4164  return ret;
4165 
4166  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4167  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4168  av_thread_message_queue_free(&f->in_thread_queue);
4169  return AVERROR(ret);
4170  }
4171 
4172  return 0;
4173 }
4174 
4175 static int init_input_threads(void)
4176 {
4177  int i, ret;
4178 
4179  for (i = 0; i < nb_input_files; i++) {
4180  ret = init_input_thread(i);
4181  if (ret < 0)
4182  return ret;
4183  }
4184  return 0;
4185 }
4186 
4187 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4188 {
4189  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4190  f->non_blocking ?
4192 }
4193 #endif
4194 
4196 {
4197  if (f->rate_emu) {
4198  int i;
4199  for (i = 0; i < f->nb_streams; i++) {
4200  InputStream *ist = input_streams[f->ist_index + i];
4201  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4202  int64_t now = av_gettime_relative() - ist->start;
4203  if (pts > now)
4204  return AVERROR(EAGAIN);
4205  }
4206  }
4207 
4208 #if HAVE_THREADS
4209  if (f->thread_queue_size)
4210  return get_input_packet_mt(f, pkt);
4211 #endif
4212  return av_read_frame(f->ctx, pkt);
4213 }
4214 
4215 static int got_eagain(void)
4216 {
4217  int i;
4218  for (i = 0; i < nb_output_streams; i++)
4219  if (output_streams[i]->unavailable)
4220  return 1;
4221  return 0;
4222 }
4223 
4224 static void reset_eagain(void)
4225 {
4226  int i;
4227  for (i = 0; i < nb_input_files; i++)
4228  input_files[i]->eagain = 0;
4229  for (i = 0; i < nb_output_streams; i++)
4230  output_streams[i]->unavailable = 0;
4231 }
4232 
4233 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4234 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4235  AVRational time_base)
4236 {
4237  int ret;
4238 
4239  if (!*duration) {
4240  *duration = tmp;
4241  return tmp_time_base;
4242  }
4243 
4244  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4245  if (ret < 0) {
4246  *duration = tmp;
4247  return tmp_time_base;
4248  }
4249 
4250  return time_base;
4251 }
4252 
4254 {
4255  InputStream *ist;
4256  AVCodecContext *avctx;
4257  int i, ret, has_audio = 0;
4258  int64_t duration = 0;
4259 
4260  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4261  if (ret < 0)
4262  return ret;
4263 
4264  for (i = 0; i < ifile->nb_streams; i++) {
4265  ist = input_streams[ifile->ist_index + i];
4266  avctx = ist->dec_ctx;
4267 
4268  /* duration is the length of the last frame in a stream
4269  * when audio stream is present we don't care about
4270  * last video frame length because it's not defined exactly */
4271  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4272  has_audio = 1;
4273  }
4274 
4275  for (i = 0; i < ifile->nb_streams; i++) {
4276  ist = input_streams[ifile->ist_index + i];
4277  avctx = ist->dec_ctx;
4278 
4279  if (has_audio) {
4280  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4281  AVRational sample_rate = {1, avctx->sample_rate};
4282 
4283  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4284  } else {
4285  continue;
4286  }
4287  } else {
4288  if (ist->framerate.num) {
4289  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4290  } else if (ist->st->avg_frame_rate.num) {
4291  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4292  } else {
4293  duration = 1;
4294  }
4295  }
4296  if (!ifile->duration)
4297  ifile->time_base = ist->st->time_base;
4298  /* the total duration of the stream, max_pts - min_pts is
4299  * the duration of the stream without the last frame */
4300  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4301  duration += ist->max_pts - ist->min_pts;
4302  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4303  ifile->time_base);
4304  }
4305 
4306  if (ifile->loop > 0)
4307  ifile->loop--;
4308 
4309  return ret;
4310 }
4311 
4312 /*
4313  * Return
4314  * - 0 -- one packet was read and processed
4315  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4316  * this function should be called again
4317  * - AVERROR_EOF -- this function should not be called again
4318  */
4319 static int process_input(int file_index)
4320 {
4321  InputFile *ifile = input_files[file_index];
4323  InputStream *ist;
4324  AVPacket pkt;
4325  int ret, thread_ret, i, j;
4326  int64_t duration;
4327  int64_t pkt_dts;
4328  int disable_discontinuity_correction = copy_ts;
4329 
4330  is = ifile->ctx;
4331  ret = get_input_packet(ifile, &pkt);
4332 
4333  if (ret == AVERROR(EAGAIN)) {
4334  ifile->eagain = 1;
4335  return ret;
4336  }
4337  if (ret < 0 && ifile->loop) {
4338  AVCodecContext *avctx;
4339  for (i = 0; i < ifile->nb_streams; i++) {
4340  ist = input_streams[ifile->ist_index + i];
4341  avctx = ist->dec_ctx;
4342  if (ist->decoding_needed) {
4343  ret = process_input_packet(ist, NULL, 1);
4344  if (ret>0)
4345  return 0;
4346  avcodec_flush_buffers(avctx);
4347  }
4348  }
4349 #if HAVE_THREADS
4350  free_input_thread(file_index);
4351 #endif
4352  ret = seek_to_start(ifile, is);
4353 #if HAVE_THREADS
4354  thread_ret = init_input_thread(file_index);
4355  if (thread_ret < 0)
4356  return thread_ret;
4357 #endif
4358  if (ret < 0)
4359  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4360  else
4361  ret = get_input_packet(ifile, &pkt);
4362  if (ret == AVERROR(EAGAIN)) {
4363  ifile->eagain = 1;
4364  return ret;
4365  }
4366  }
4367  if (ret < 0) {
4368  if (ret != AVERROR_EOF) {
4369  print_error(is->url, ret);
4370  if (exit_on_error)
4371  exit_program(1);
4372  }
4373 
4374  for (i = 0; i < ifile->nb_streams; i++) {
4375  ist = input_streams[ifile->ist_index + i];
4376  if (ist->decoding_needed) {
4377  ret = process_input_packet(ist, NULL, 0);
4378  if (ret>0)
4379  return 0;
4380  }
4381 
4382  /* mark all outputs that don't go through lavfi as finished */
4383  for (j = 0; j < nb_output_streams; j++) {
4384  OutputStream *ost = output_streams[j];
4385 
4386  if (ost->source_index == ifile->ist_index + i &&
4387  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4388  finish_output_stream(ost);
4389  }
4390  }
4391 
4392  ifile->eof_reached = 1;
4393  return AVERROR(EAGAIN);
4394  }
4395 
4396  reset_eagain();
4397 
4398  if (do_pkt_dump) {
4400  is->streams[pkt.stream_index]);
4401  }
4402  /* the following test is needed in case new streams appear
4403  dynamically in stream : we ignore them */
4404  if (pkt.stream_index >= ifile->nb_streams) {
4405  report_new_stream(file_index, &pkt);
4406  goto discard_packet;
4407  }
4408 
4409  ist = input_streams[ifile->ist_index + pkt.stream_index];
4410 
4411  ist->data_size += pkt.size;
4412  ist->nb_packets++;
4413 
4414  if (ist->discard)
4415  goto discard_packet;
4416 
4417  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4419  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4420  if (exit_on_error)
4421  exit_program(1);
4422  }
4423 
4424  if (debug_ts) {
4425  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4426  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4430  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4431  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4432  av_ts2str(input_files[ist->file_index]->ts_offset),
4433  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4434  }
4435 
4436  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4437  int64_t stime, stime2;
4438  // Correcting starttime based on the enabled streams
4439  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4440  // so we instead do it here as part of discontinuity handling
4441  if ( ist->next_dts == AV_NOPTS_VALUE
4442  && ifile->ts_offset == -is->start_time
4443  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4444  int64_t new_start_time = INT64_MAX;
4445  for (i=0; i<is->nb_streams; i++) {
4446  AVStream *st = is->streams[i];
4447  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4448  continue;
4449  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4450  }
4451  if (new_start_time > is->start_time) {
4452  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4453  ifile->ts_offset = -new_start_time;
4454  }
4455  }
4456 
4457  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4458  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4459  ist->wrap_correction_done = 1;
4460 
4461  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4462  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4463  ist->wrap_correction_done = 0;
4464  }
4465  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4466  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4467  ist->wrap_correction_done = 0;
4468  }
4469  }
4470 
4471  /* add the stream-global side data to the first packet */
4472  if (ist->nb_packets == 1) {
4473  for (i = 0; i < ist->st->nb_side_data; i++) {
4474  AVPacketSideData *src_sd = &ist->st->side_data[i];
4475  uint8_t *dst_data;
4476 
4477  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4478  continue;
4479 
4480  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4481  continue;
4482 
4483  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4484  if (!dst_data)
4485  exit_program(1);
4486 
4487  memcpy(dst_data, src_sd->data, src_sd->size);
4488  }
4489  }
4490 
4491  if (pkt.dts != AV_NOPTS_VALUE)
4492  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4493  if (pkt.pts != AV_NOPTS_VALUE)
4494  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4495 
4496  if (pkt.pts != AV_NOPTS_VALUE)
4497  pkt.pts *= ist->ts_scale;
4498  if (pkt.dts != AV_NOPTS_VALUE)
4499  pkt.dts *= ist->ts_scale;
4500 
4502  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4504  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4505  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4506  int64_t delta = pkt_dts - ifile->last_ts;
4507  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4508  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4509  ifile->ts_offset -= delta;
4511  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4512  delta, ifile->ts_offset);
4513  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4514  if (pkt.pts != AV_NOPTS_VALUE)
4515  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4516  }
4517  }
4518 
4519  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4520  if (pkt.pts != AV_NOPTS_VALUE) {
4521  pkt.pts += duration;
4522  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4523  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4524  }
4525 
4526  if (pkt.dts != AV_NOPTS_VALUE)
4527  pkt.dts += duration;
4528 
4530 
4531  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4532  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4533  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4534  ist->st->time_base, AV_TIME_BASE_Q,
4536  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4537  disable_discontinuity_correction = 0;
4538  }
4539 
4540  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4542  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4543  !disable_discontinuity_correction) {
4544  int64_t delta = pkt_dts - ist->next_dts;
4545  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4546  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4547  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4548  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4549  ifile->ts_offset -= delta;
4551  "timestamp discontinuity for stream #%d:%d "
4552  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4553  ist->file_index, ist->st->index, ist->st->id,
4555  delta, ifile->ts_offset);
4556  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4557  if (pkt.pts != AV_NOPTS_VALUE)
4558  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4559  }
4560  } else {
4561  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4562  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4563  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4564  pkt.dts = AV_NOPTS_VALUE;
4565  }
4566  if (pkt.pts != AV_NOPTS_VALUE){
4567  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4568  delta = pkt_pts - ist->next_dts;
4569  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4570  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4571  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4572  pkt.pts = AV_NOPTS_VALUE;
4573  }
4574  }
4575  }
4576  }
4577 
4578  if (pkt.dts != AV_NOPTS_VALUE)
4579  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4580 
4581  if (debug_ts) {