FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
539  if (ist->sub2video.sub_queue) {
540  while (av_fifo_size(ist->sub2video.sub_queue)) {
541  AVSubtitle sub;
542  av_fifo_generic_read(ist->sub2video.sub_queue,
543  &sub, sizeof(sub), NULL);
545  }
546  av_fifo_freep(&ist->sub2video.sub_queue);
547  }
549  av_freep(&ifilter->name);
550  av_freep(&fg->inputs[j]);
551  }
552  av_freep(&fg->inputs);
553  for (j = 0; j < fg->nb_outputs; j++) {
554  OutputFilter *ofilter = fg->outputs[j];
555 
557  av_freep(&ofilter->name);
558  av_freep(&fg->outputs[j]);
559  }
560  av_freep(&fg->outputs);
561  av_freep(&fg->graph_desc);
562 
564  }
566 
568 
569  /* close files */
570  for (i = 0; i < nb_output_files; i++) {
571  OutputFile *of = output_files[i];
573  if (!of)
574  continue;
575  s = of->ctx;
576  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
577  avio_closep(&s->pb);
579  av_dict_free(&of->opts);
580 
582  }
583  for (i = 0; i < nb_output_streams; i++) {
585 
586  if (!ost)
587  continue;
588 
590 
595 
598  av_freep(&ost->avfilter);
600 
603 
606 
609 
610  if (ost->muxing_queue) {
611  while (av_fifo_size(ost->muxing_queue)) {
612  AVPacket *pkt;
615  }
617  }
618 
620  }
621 #if HAVE_THREADS
622  free_input_threads();
623 #endif
624  for (i = 0; i < nb_input_files; i++) {
628  }
629  for (i = 0; i < nb_input_streams; i++) {
631 
632  av_frame_free(&ist->decoded_frame);
633  av_packet_free(&ist->pkt);
634  av_dict_free(&ist->decoder_opts);
635  avsubtitle_free(&ist->prev_sub.subtitle);
636  av_frame_free(&ist->sub2video.frame);
637  av_freep(&ist->filters);
638  av_freep(&ist->hwaccel_device);
639  av_freep(&ist->dts_buffer);
640 
641  avcodec_free_context(&ist->dec_ctx);
642 
644  }
645 
646  if (vstats_file) {
647  if (fclose(vstats_file))
649  "Error closing vstats file, loss of information possible: %s\n",
650  av_err2str(AVERROR(errno)));
651  }
654 
659 
660  uninit_opts();
661 
663 
664  if (received_sigterm) {
665  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
666  (int) received_sigterm);
667  } else if (ret && atomic_load(&transcode_init_done)) {
668  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
669  }
670  term_exit();
671  ffmpeg_exited = 1;
672 }
673 
675 {
676  const AVDictionaryEntry *t = NULL;
677 
678  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
680  }
681 }
682 
684 {
685  const AVDictionaryEntry *t;
686  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
687  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
688  exit_program(1);
689  }
690 }
691 
692 static void abort_codec_experimental(const AVCodec *c, int encoder)
693 {
694  exit_program(1);
695 }
696 
697 static void update_benchmark(const char *fmt, ...)
698 {
699  if (do_benchmark_all) {
701  va_list va;
702  char buf[1024];
703 
704  if (fmt) {
705  va_start(va, fmt);
706  vsnprintf(buf, sizeof(buf), fmt, va);
707  va_end(va);
709  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
712  t.real_usec - current_time.real_usec, buf);
713  }
714  current_time = t;
715  }
716 }
717 
719 {
720  int i;
721  for (i = 0; i < nb_output_streams; i++) {
722  OutputStream *ost2 = output_streams[i];
723  ost2->finished |= ost == ost2 ? this_stream : others;
724  }
725 }
726 
727 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
728 {
729  AVFormatContext *s = of->ctx;
730  AVStream *st = ost->st;
731  int ret;
732 
733  /*
734  * Audio encoders may split the packets -- #frames in != #packets out.
735  * But there is no reordering, so we can limit the number of output packets
736  * by simply dropping them here.
737  * Counting encoded video frames needs to be done separately because of
738  * reordering, see do_video_out().
739  * Do not count the packet when unqueued because it has been counted when queued.
740  */
741  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
742  if (ost->frame_number >= ost->max_frames) {
744  return;
745  }
746  ost->frame_number++;
747  }
748 
749  if (!of->header_written) {
750  AVPacket *tmp_pkt;
751  /* the muxer is not initialized yet, buffer the packet */
752  if (!av_fifo_space(ost->muxing_queue)) {
753  size_t cur_size = av_fifo_size(ost->muxing_queue);
754  unsigned int are_we_over_size =
756  size_t limit = are_we_over_size ? ost->max_muxing_queue_size : INT_MAX;
757  size_t new_size = FFMIN(2 * cur_size, limit);
758 
759  if (new_size <= cur_size) {
761  "Too many packets buffered for output stream %d:%d.\n",
762  ost->file_index, ost->st->index);
763  exit_program(1);
764  }
765  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
766  if (ret < 0)
767  exit_program(1);
768  }
770  if (ret < 0)
771  exit_program(1);
772  tmp_pkt = av_packet_alloc();
773  if (!tmp_pkt)
774  exit_program(1);
775  av_packet_move_ref(tmp_pkt, pkt);
776  ost->muxing_queue_data_size += tmp_pkt->size;
777  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
778  return;
779  }
780 
783  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
784 
786  int i;
788  NULL);
789  ost->quality = sd ? AV_RL32(sd) : -1;
790  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
791 
792  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
793  if (sd && i < sd[5])
794  ost->error[i] = AV_RL64(sd + 8 + 8*i);
795  else
796  ost->error[i] = -1;
797  }
798 
799  if (ost->frame_rate.num && ost->is_cfr) {
800  if (pkt->duration > 0)
801  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
803  ost->mux_timebase);
804  }
805  }
806 
808 
809  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
810  if (pkt->dts != AV_NOPTS_VALUE &&
811  pkt->pts != AV_NOPTS_VALUE &&
812  pkt->dts > pkt->pts) {
813  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
814  pkt->dts, pkt->pts,
815  ost->file_index, ost->st->index);
816  pkt->pts =
817  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
818  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
819  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
820  }
822  pkt->dts != AV_NOPTS_VALUE &&
825  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
826  if (pkt->dts < max) {
827  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
828  if (exit_on_error)
829  loglevel = AV_LOG_ERROR;
830  av_log(s, loglevel, "Non-monotonous DTS in output stream "
831  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
833  if (exit_on_error) {
834  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
835  exit_program(1);
836  }
837  av_log(s, loglevel, "changing to %"PRId64". This may result "
838  "in incorrect timestamps in the output file.\n",
839  max);
840  if (pkt->pts >= pkt->dts)
841  pkt->pts = FFMAX(pkt->pts, max);
842  pkt->dts = max;
843  }
844  }
845  }
846  ost->last_mux_dts = pkt->dts;
847 
848  ost->data_size += pkt->size;
849  ost->packets_written++;
850 
852 
853  if (debug_ts) {
854  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
855  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
859  pkt->size
860  );
861  }
862 
864  if (ret < 0) {
865  print_error("av_interleaved_write_frame()", ret);
866  main_return_code = 1;
868  }
869 }
870 
872 {
875 
877  if (of->shortest) {
878  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
879  of->recording_time = FFMIN(of->recording_time, end);
880  }
881 }
882 
883 /*
884  * Send a single packet to the output, applying any bitstream filters
885  * associated with the output stream. This may result in any number
886  * of packets actually being written, depending on what bitstream
887  * filters are applied. The supplied packet is consumed and will be
888  * blank (as if newly-allocated) when this function returns.
889  *
890  * If eof is set, instead indicate EOF to all bitstream filters and
891  * therefore flush any delayed packets to the output. A blank packet
892  * must be supplied in this case.
893  */
895  OutputStream *ost, int eof)
896 {
897  int ret = 0;
898 
899  /* apply the output bitstream filters */
900  if (ost->bsf_ctx) {
901  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
902  if (ret < 0)
903  goto finish;
904  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
905  write_packet(of, pkt, ost, 0);
906  if (ret == AVERROR(EAGAIN))
907  ret = 0;
908  } else if (!eof)
909  write_packet(of, pkt, ost, 0);
910 
911 finish:
912  if (ret < 0 && ret != AVERROR_EOF) {
913  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
914  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
915  if(exit_on_error)
916  exit_program(1);
917  }
918 }
919 
921 {
923 
924  if (of->recording_time != INT64_MAX &&
926  AV_TIME_BASE_Q) >= 0) {
928  return 0;
929  }
930  return 1;
931 }
932 
934  AVFrame *frame)
935 {
936  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
937  AVCodecContext *enc = ost->enc_ctx;
938  if (!frame || frame->pts == AV_NOPTS_VALUE ||
939  !enc || !ost->filter || !ost->filter->graph->graph)
940  goto early_exit;
941 
942  {
944 
945  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
947  AVRational tb = enc->time_base;
948  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
949 
950  tb.den <<= extra_bits;
951  float_pts =
952  av_rescale_q(frame->pts, filter_tb, tb) -
954  float_pts /= 1 << extra_bits;
955  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
956  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
957 
958  frame->pts =
959  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
961  }
962 
963 early_exit:
964 
965  if (debug_ts) {
966  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
967  frame ? av_ts2str(frame->pts) : "NULL",
968  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
969  float_pts,
970  enc ? enc->time_base.num : -1,
971  enc ? enc->time_base.den : -1);
972  }
973 
974  return float_pts;
975 }
976 
978  char *error, int error_len);
979 
981  unsigned int fatal)
982 {
983  int ret = AVERROR_BUG;
984  char error[1024] = {0};
985 
986  if (ost->initialized)
987  return 0;
988 
989  ret = init_output_stream(ost, frame, error, sizeof(error));
990  if (ret < 0) {
991  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
993 
994  if (fatal)
995  exit_program(1);
996  }
997 
998  return ret;
999 }
1000 
1002  AVFrame *frame)
1003 {
1004  AVCodecContext *enc = ost->enc_ctx;
1005  AVPacket *pkt = ost->pkt;
1006  int ret;
1007 
1009 
1010  if (!check_recording_time(ost))
1011  return;
1012 
1013  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1014  frame->pts = ost->sync_opts;
1015  ost->sync_opts = frame->pts + frame->nb_samples;
1016  ost->samples_encoded += frame->nb_samples;
1017  ost->frames_encoded++;
1018 
1020  if (debug_ts) {
1021  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1022  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1023  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1024  enc->time_base.num, enc->time_base.den);
1025  }
1026 
1027  ret = avcodec_send_frame(enc, frame);
1028  if (ret < 0)
1029  goto error;
1030 
1031  while (1) {
1032  ret = avcodec_receive_packet(enc, pkt);
1033  if (ret == AVERROR(EAGAIN))
1034  break;
1035  if (ret < 0)
1036  goto error;
1037 
1038  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1039 
1041 
1042  if (debug_ts) {
1043  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1044  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1047  }
1048 
1049  output_packet(of, pkt, ost, 0);
1050  }
1051 
1052  return;
1053 error:
1054  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1055  exit_program(1);
1056 }
1057 
1058 static void do_subtitle_out(OutputFile *of,
1059  OutputStream *ost,
1060  AVSubtitle *sub)
1061 {
1062  int subtitle_out_max_size = 1024 * 1024;
1063  int subtitle_out_size, nb, i;
1064  AVCodecContext *enc;
1065  AVPacket *pkt = ost->pkt;
1066  int64_t pts;
1067 
1068  if (sub->pts == AV_NOPTS_VALUE) {
1069  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1070  if (exit_on_error)
1071  exit_program(1);
1072  return;
1073  }
1074 
1075  enc = ost->enc_ctx;
1076 
1077  if (!subtitle_out) {
1078  subtitle_out = av_malloc(subtitle_out_max_size);
1079  if (!subtitle_out) {
1080  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1081  exit_program(1);
1082  }
1083  }
1084 
1085  /* Note: DVB subtitle need one packet to draw them and one other
1086  packet to clear them */
1087  /* XXX: signal it in the codec context ? */
1088  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1089  nb = 2;
1090  else
1091  nb = 1;
1092 
1093  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1094  pts = sub->pts;
1097  for (i = 0; i < nb; i++) {
1098  unsigned save_num_rects = sub->num_rects;
1099 
1101  if (!check_recording_time(ost))
1102  return;
1103 
1104  sub->pts = pts;
1105  // start_display_time is required to be 0
1106  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1107  sub->end_display_time -= sub->start_display_time;
1108  sub->start_display_time = 0;
1109  if (i == 1)
1110  sub->num_rects = 0;
1111 
1112  ost->frames_encoded++;
1113 
1114  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1115  subtitle_out_max_size, sub);
1116  if (i == 1)
1117  sub->num_rects = save_num_rects;
1118  if (subtitle_out_size < 0) {
1119  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1120  exit_program(1);
1121  }
1122 
1124  pkt->data = subtitle_out;
1125  pkt->size = subtitle_out_size;
1127  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1128  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1129  /* XXX: the pts correction is handled here. Maybe handling
1130  it in the codec would be better */
1131  if (i == 0)
1132  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1133  else
1134  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1135  }
1136  pkt->dts = pkt->pts;
1137  output_packet(of, pkt, ost, 0);
1138  }
1139 }
1140 
1141 /* May modify/reset next_picture */
1142 static void do_video_out(OutputFile *of,
1143  OutputStream *ost,
1144  AVFrame *next_picture)
1145 {
1146  int ret;
1147  AVPacket *pkt = ost->pkt;
1148  AVCodecContext *enc = ost->enc_ctx;
1149  AVRational frame_rate;
1150  int nb_frames, nb0_frames, i;
1151  double delta, delta0;
1152  double duration = 0;
1153  double sync_ipts = AV_NOPTS_VALUE;
1154  int frame_size = 0;
1155  InputStream *ist = NULL;
1157 
1158  init_output_stream_wrapper(ost, next_picture, 1);
1159  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1160 
1161  if (ost->source_index >= 0)
1163 
1164  frame_rate = av_buffersink_get_frame_rate(filter);
1165  if (frame_rate.num > 0 && frame_rate.den > 0)
1166  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1167 
1168  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1170 
1171  if (!ost->filters_script &&
1172  !ost->filters &&
1173  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1174  next_picture &&
1175  ist &&
1176  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1177  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1178  }
1179 
1180  if (!next_picture) {
1181  //end, flushing
1182  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1183  ost->last_nb0_frames[1],
1184  ost->last_nb0_frames[2]);
1185  } else {
1186  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1187  delta = delta0 + duration;
1188 
1189  /* by default, we output a single frame */
1190  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1191  nb_frames = 1;
1192 
1193  if (delta0 < 0 &&
1194  delta > 0 &&
1196  ost->vsync_method != VSYNC_DROP) {
1197  if (delta0 < -0.6) {
1198  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1199  } else
1200  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1201  sync_ipts = ost->sync_opts;
1202  duration += delta0;
1203  delta0 = 0;
1204  }
1205 
1206  switch (ost->vsync_method) {
1207  case VSYNC_VSCFR:
1208  if (ost->frame_number == 0 && delta0 >= 0.5) {
1209  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1210  delta = duration;
1211  delta0 = 0;
1212  ost->sync_opts = llrint(sync_ipts);
1213  }
1214  case VSYNC_CFR:
1215  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1216  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1217  nb_frames = 0;
1218  } else if (delta < -1.1)
1219  nb_frames = 0;
1220  else if (delta > 1.1) {
1221  nb_frames = lrintf(delta);
1222  if (delta0 > 1.1)
1223  nb0_frames = llrintf(delta0 - 0.6);
1224  }
1225  break;
1226  case VSYNC_VFR:
1227  if (delta <= -0.6)
1228  nb_frames = 0;
1229  else if (delta > 0.6)
1230  ost->sync_opts = llrint(sync_ipts);
1231  break;
1232  case VSYNC_DROP:
1233  case VSYNC_PASSTHROUGH:
1234  ost->sync_opts = llrint(sync_ipts);
1235  break;
1236  default:
1237  av_assert0(0);
1238  }
1239  }
1240 
1241  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1242  nb0_frames = FFMIN(nb0_frames, nb_frames);
1243 
1244  memmove(ost->last_nb0_frames + 1,
1246  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1247  ost->last_nb0_frames[0] = nb0_frames;
1248 
1249  if (nb0_frames == 0 && ost->last_dropped) {
1250  nb_frames_drop++;
1252  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1254  }
1255  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1256  if (nb_frames > dts_error_threshold * 30) {
1257  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1258  nb_frames_drop++;
1259  return;
1260  }
1261  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1262  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1263  if (nb_frames_dup > dup_warning) {
1264  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1265  dup_warning *= 10;
1266  }
1267  }
1268  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1269  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1270 
1271  /* duplicates frame if needed */
1272  for (i = 0; i < nb_frames; i++) {
1273  AVFrame *in_picture;
1274  int forced_keyframe = 0;
1275  double pts_time;
1276 
1277  if (i < nb0_frames && ost->last_frame->buf[0]) {
1278  in_picture = ost->last_frame;
1279  } else
1280  in_picture = next_picture;
1281 
1282  if (!in_picture)
1283  return;
1284 
1285  in_picture->pts = ost->sync_opts;
1286 
1287  if (!check_recording_time(ost))
1288  return;
1289 
1290  in_picture->quality = enc->global_quality;
1291  in_picture->pict_type = 0;
1292 
1294  in_picture->pts != AV_NOPTS_VALUE)
1295  ost->forced_kf_ref_pts = in_picture->pts;
1296 
1297  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1298  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1300  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1301  ost->forced_kf_index++;
1302  forced_keyframe = 1;
1303  } else if (ost->forced_keyframes_pexpr) {
1304  double res;
1308  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1314  res);
1315  if (res) {
1316  forced_keyframe = 1;
1322  }
1323 
1325  } else if ( ost->forced_keyframes
1326  && !strncmp(ost->forced_keyframes, "source", 6)
1327  && in_picture->key_frame==1
1328  && !i) {
1329  forced_keyframe = 1;
1330  } else if ( ost->forced_keyframes
1331  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1332  && !i) {
1333  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1334  ost->dropped_keyframe = 0;
1335  }
1336 
1337  if (forced_keyframe) {
1338  in_picture->pict_type = AV_PICTURE_TYPE_I;
1339  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1340  }
1341 
1343  if (debug_ts) {
1344  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1345  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1346  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1347  enc->time_base.num, enc->time_base.den);
1348  }
1349 
1350  ost->frames_encoded++;
1351 
1352  ret = avcodec_send_frame(enc, in_picture);
1353  if (ret < 0)
1354  goto error;
1355  // Make sure Closed Captions will not be duplicated
1357 
1358  while (1) {
1359  ret = avcodec_receive_packet(enc, pkt);
1360  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1361  if (ret == AVERROR(EAGAIN))
1362  break;
1363  if (ret < 0)
1364  goto error;
1365 
1366  if (debug_ts) {
1367  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1368  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1371  }
1372 
1374  pkt->pts = ost->sync_opts;
1375 
1377 
1378  if (debug_ts) {
1379  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1380  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1383  }
1384 
1385  frame_size = pkt->size;
1386  output_packet(of, pkt, ost, 0);
1387 
1388  /* if two pass, output log */
1389  if (ost->logfile && enc->stats_out) {
1390  fprintf(ost->logfile, "%s", enc->stats_out);
1391  }
1392  }
1393  ost->sync_opts++;
1394  /*
1395  * For video, number of frames in == number of packets out.
1396  * But there may be reordering, so we can't throw away frames on encoder
1397  * flush, we need to limit them here, before they go into encoder.
1398  */
1399  ost->frame_number++;
1400 
1401  if (vstats_filename && frame_size)
1403  }
1404 
1406  if (next_picture)
1407  av_frame_move_ref(ost->last_frame, next_picture);
1408 
1409  return;
1410 error:
1411  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1412  exit_program(1);
1413 }
1414 
1415 static double psnr(double d)
1416 {
1417  return -10.0 * log10(d);
1418 }
1419 
1421 {
1422  AVCodecContext *enc;
1423  int frame_number;
1424  double ti1, bitrate, avg_bitrate;
1425 
1426  /* this is executed just the first time do_video_stats is called */
1427  if (!vstats_file) {
1428  vstats_file = fopen(vstats_filename, "w");
1429  if (!vstats_file) {
1430  perror("fopen");
1431  exit_program(1);
1432  }
1433  }
1434 
1435  enc = ost->enc_ctx;
1436  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1437  frame_number = ost->st->nb_frames;
1438  if (vstats_version <= 1) {
1439  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1440  ost->quality / (float)FF_QP2LAMBDA);
1441  } else {
1442  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1443  ost->quality / (float)FF_QP2LAMBDA);
1444  }
1445 
1446  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1447  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1448 
1449  fprintf(vstats_file,"f_size= %6d ", frame_size);
1450  /* compute pts value */
1452  if (ti1 < 0.01)
1453  ti1 = 0.01;
1454 
1455  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1456  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1457  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1458  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1459  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1460  }
1461 }
1462 
1464 {
1467 
1469 
1470  if (of->shortest) {
1471  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
1472  of->recording_time = FFMIN(of->recording_time, end);
1473  }
1474 }
1475 
1476 /**
1477  * Get and encode new output from any of the filtergraphs, without causing
1478  * activity.
1479  *
1480  * @return 0 for success, <0 for severe errors
1481  */
1482 static int reap_filters(int flush)
1483 {
1484  AVFrame *filtered_frame = NULL;
1485  int i;
1486 
1487  /* Reap all buffers present in the buffer sinks */
1488  for (i = 0; i < nb_output_streams; i++) {
1492  AVCodecContext *enc = ost->enc_ctx;
1493  int ret = 0;
1494 
1495  if (!ost->filter || !ost->filter->graph->graph)
1496  continue;
1497  filter = ost->filter->filter;
1498 
1499  /*
1500  * Unlike video, with audio the audio frame size matters.
1501  * Currently we are fully reliant on the lavfi filter chain to
1502  * do the buffering deed for us, and thus the frame size parameter
1503  * needs to be set accordingly. Where does one get the required
1504  * frame size? From the initialized AVCodecContext of an audio
1505  * encoder. Thus, if we have gotten to an audio stream, initialize
1506  * the encoder earlier than receiving the first AVFrame.
1507  */
1510 
1511  filtered_frame = ost->filtered_frame;
1512 
1513  while (1) {
1514  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1516  if (ret < 0) {
1517  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1519  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520  } else if (flush && ret == AVERROR_EOF) {
1522  do_video_out(of, ost, NULL);
1523  }
1524  break;
1525  }
1526  if (ost->finished) {
1527  av_frame_unref(filtered_frame);
1528  continue;
1529  }
1530 
1531  switch (av_buffersink_get_type(filter)) {
1532  case AVMEDIA_TYPE_VIDEO:
1533  if (!ost->frame_aspect_ratio.num)
1534  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1535 
1536  do_video_out(of, ost, filtered_frame);
1537  break;
1538  case AVMEDIA_TYPE_AUDIO:
1539  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540  enc->channels != filtered_frame->channels) {
1542  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1543  break;
1544  }
1545  do_audio_out(of, ost, filtered_frame);
1546  break;
1547  default:
1548  // TODO support subtitle filters
1549  av_assert0(0);
1550  }
1551 
1552  av_frame_unref(filtered_frame);
1553  }
1554  }
1555 
1556  return 0;
1557 }
1558 
1559 static void print_final_stats(int64_t total_size)
1560 {
1561  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562  uint64_t subtitle_size = 0;
1563  uint64_t data_size = 0;
1564  float percent = -1.0;
1565  int i, j;
1566  int pass1_used = 1;
1567 
1568  for (i = 0; i < nb_output_streams; i++) {
1570  switch (ost->enc_ctx->codec_type) {
1571  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574  default: other_size += ost->data_size; break;
1575  }
1576  extra_size += ost->enc_ctx->extradata_size;
1577  data_size += ost->data_size;
1580  pass1_used = 0;
1581  }
1582 
1583  if (data_size && total_size>0 && total_size >= data_size)
1584  percent = 100.0 * (total_size - data_size) / data_size;
1585 
1586  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587  video_size / 1024.0,
1588  audio_size / 1024.0,
1589  subtitle_size / 1024.0,
1590  other_size / 1024.0,
1591  extra_size / 1024.0);
1592  if (percent >= 0.0)
1593  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1594  else
1595  av_log(NULL, AV_LOG_INFO, "unknown");
1596  av_log(NULL, AV_LOG_INFO, "\n");
1597 
1598  /* print verbose per-stream stats */
1599  for (i = 0; i < nb_input_files; i++) {
1600  InputFile *f = input_files[i];
1601  uint64_t total_packets = 0, total_size = 0;
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1604  i, f->ctx->url);
1605 
1606  for (j = 0; j < f->nb_streams; j++) {
1607  InputStream *ist = input_streams[f->ist_index + j];
1608  enum AVMediaType type = ist->dec_ctx->codec_type;
1609 
1610  total_size += ist->data_size;
1611  total_packets += ist->nb_packets;
1612 
1613  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1614  i, j, media_type_string(type));
1615  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616  ist->nb_packets, ist->data_size);
1617 
1618  if (ist->decoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620  ist->frames_decoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "\n");
1627  }
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630  total_packets, total_size);
1631  }
1632 
1633  for (i = 0; i < nb_output_files; i++) {
1634  OutputFile *of = output_files[i];
1635  uint64_t total_packets = 0, total_size = 0;
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1638  i, of->ctx->url);
1639 
1640  for (j = 0; j < of->ctx->nb_streams; j++) {
1643 
1644  total_size += ost->data_size;
1645  total_packets += ost->packets_written;
1646 
1647  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1648  i, j, media_type_string(type));
1649  if (ost->encoding_needed) {
1650  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651  ost->frames_encoded);
1652  if (type == AVMEDIA_TYPE_AUDIO)
1653  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654  av_log(NULL, AV_LOG_VERBOSE, "; ");
1655  }
1656 
1657  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1659 
1660  av_log(NULL, AV_LOG_VERBOSE, "\n");
1661  }
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664  total_packets, total_size);
1665  }
1666  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1668  if (pass1_used) {
1669  av_log(NULL, AV_LOG_WARNING, "\n");
1670  } else {
1671  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1672  }
1673  }
1674 }
1675 
1676 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1677 {
1678  AVBPrint buf, buf_script;
1679  OutputStream *ost;
1680  AVFormatContext *oc;
1681  int64_t total_size;
1682  AVCodecContext *enc;
1683  int frame_number, vid, i;
1684  double bitrate;
1685  double speed;
1686  int64_t pts = INT64_MIN + 1;
1687  static int64_t last_time = -1;
1688  static int first_report = 1;
1689  static int qp_histogram[52];
1690  int hours, mins, secs, us;
1691  const char *hours_sign;
1692  int ret;
1693  float t;
1694 
1695  if (!print_stats && !is_last_report && !progress_avio)
1696  return;
1697 
1698  if (!is_last_report) {
1699  if (last_time == -1) {
1700  last_time = cur_time;
1701  }
1702  if (((cur_time - last_time) < stats_period && !first_report) ||
1703  (first_report && nb_output_dumped < nb_output_files))
1704  return;
1705  last_time = cur_time;
1706  }
1707 
1708  t = (cur_time-timer_start) / 1000000.0;
1709 
1710 
1711  oc = output_files[0]->ctx;
1712 
1713  total_size = avio_size(oc->pb);
1714  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1715  total_size = avio_tell(oc->pb);
1716 
1717  vid = 0;
1719  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1720  for (i = 0; i < nb_output_streams; i++) {
1721  float q = -1;
1722  ost = output_streams[i];
1723  enc = ost->enc_ctx;
1724  if (!ost->stream_copy)
1725  q = ost->quality / (float) FF_QP2LAMBDA;
1726 
1727  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1728  av_bprintf(&buf, "q=%2.1f ", q);
1729  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1730  ost->file_index, ost->index, q);
1731  }
1732  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1733  float fps;
1734 
1735  frame_number = ost->frame_number;
1736  fps = t > 1 ? frame_number / t : 0;
1737  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1738  frame_number, fps < 9.95, fps, q);
1739  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1740  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1741  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1742  ost->file_index, ost->index, q);
1743  if (is_last_report)
1744  av_bprintf(&buf, "L");
1745  if (qp_hist) {
1746  int j;
1747  int qp = lrintf(q);
1748  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1749  qp_histogram[qp]++;
1750  for (j = 0; j < 32; j++)
1751  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1752  }
1753 
1754  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1755  int j;
1756  double error, error_sum = 0;
1757  double scale, scale_sum = 0;
1758  double p;
1759  char type[3] = { 'Y','U','V' };
1760  av_bprintf(&buf, "PSNR=");
1761  for (j = 0; j < 3; j++) {
1762  if (is_last_report) {
1763  error = enc->error[j];
1764  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1765  } else {
1766  error = ost->error[j];
1767  scale = enc->width * enc->height * 255.0 * 255.0;
1768  }
1769  if (j)
1770  scale /= 4;
1771  error_sum += error;
1772  scale_sum += scale;
1773  p = psnr(error / scale);
1774  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1775  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1776  ost->file_index, ost->index, type[j] | 32, p);
1777  }
1778  p = psnr(error_sum / scale_sum);
1779  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1780  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1781  ost->file_index, ost->index, p);
1782  }
1783  vid = 1;
1784  }
1785  /* compute min output value */
1789  if (copy_ts) {
1790  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1794  }
1795  }
1796 
1797  if (is_last_report)
1799  }
1800 
1801  secs = FFABS(pts) / AV_TIME_BASE;
1802  us = FFABS(pts) % AV_TIME_BASE;
1803  mins = secs / 60;
1804  secs %= 60;
1805  hours = mins / 60;
1806  mins %= 60;
1807  hours_sign = (pts < 0) ? "-" : "";
1808 
1809  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1810  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1811 
1812  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1813  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1814  if (pts == AV_NOPTS_VALUE) {
1815  av_bprintf(&buf, "N/A ");
1816  } else {
1817  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1818  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1819  }
1820 
1821  if (bitrate < 0) {
1822  av_bprintf(&buf, "bitrate=N/A");
1823  av_bprintf(&buf_script, "bitrate=N/A\n");
1824  }else{
1825  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1826  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1827  }
1828 
1829  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1830  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1831  if (pts == AV_NOPTS_VALUE) {
1832  av_bprintf(&buf_script, "out_time_us=N/A\n");
1833  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1834  av_bprintf(&buf_script, "out_time=N/A\n");
1835  } else {
1836  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1837  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1838  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1839  hours_sign, hours, mins, secs, us);
1840  }
1841 
1843  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1844  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1845  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1846 
1847  if (speed < 0) {
1848  av_bprintf(&buf, " speed=N/A");
1849  av_bprintf(&buf_script, "speed=N/A\n");
1850  } else {
1851  av_bprintf(&buf, " speed=%4.3gx", speed);
1852  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1853  }
1854 
1855  if (print_stats || is_last_report) {
1856  const char end = is_last_report ? '\n' : '\r';
1857  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1858  fprintf(stderr, "%s %c", buf.str, end);
1859  } else
1860  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1861 
1862  fflush(stderr);
1863  }
1864  av_bprint_finalize(&buf, NULL);
1865 
1866  if (progress_avio) {
1867  av_bprintf(&buf_script, "progress=%s\n",
1868  is_last_report ? "end" : "continue");
1869  avio_write(progress_avio, buf_script.str,
1870  FFMIN(buf_script.len, buf_script.size - 1));
1872  av_bprint_finalize(&buf_script, NULL);
1873  if (is_last_report) {
1874  if ((ret = avio_closep(&progress_avio)) < 0)
1876  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1877  }
1878  }
1879 
1880  first_report = 0;
1881 
1882  if (is_last_report)
1883  print_final_stats(total_size);
1884 }
1885 
1887 {
1888  // We never got any input. Set a fake format, which will
1889  // come from libavformat.
1890  ifilter->format = par->format;
1892  ifilter->channels = par->channels;
1894  ifilter->width = par->width;
1895  ifilter->height = par->height;
1897 }
1898 
1899 static void flush_encoders(void)
1900 {
1901  int i, ret;
1902 
1903  for (i = 0; i < nb_output_streams; i++) {
1905  AVCodecContext *enc = ost->enc_ctx;
1907 
1908  if (!ost->encoding_needed)
1909  continue;
1910 
1911  // Try to enable encoding with no input frames.
1912  // Maybe we should just let encoding fail instead.
1913  if (!ost->initialized) {
1914  FilterGraph *fg = ost->filter->graph;
1915 
1917  "Finishing stream %d:%d without any data written to it.\n",
1918  ost->file_index, ost->st->index);
1919 
1920  if (ost->filter && !fg->graph) {
1921  int x;
1922  for (x = 0; x < fg->nb_inputs; x++) {
1923  InputFilter *ifilter = fg->inputs[x];
1924  if (ifilter->format < 0)
1926  }
1927 
1929  continue;
1930 
1931  ret = configure_filtergraph(fg);
1932  if (ret < 0) {
1933  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1934  exit_program(1);
1935  }
1936 
1938  }
1939 
1941  }
1942 
1944  continue;
1945 
1946  for (;;) {
1947  const char *desc = NULL;
1948  AVPacket *pkt = ost->pkt;
1949  int pkt_size;
1950 
1951  switch (enc->codec_type) {
1952  case AVMEDIA_TYPE_AUDIO:
1953  desc = "audio";
1954  break;
1955  case AVMEDIA_TYPE_VIDEO:
1956  desc = "video";
1957  break;
1958  default:
1959  av_assert0(0);
1960  }
1961 
1963 
1964  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1965  ret = avcodec_send_frame(enc, NULL);
1966  if (ret < 0) {
1967  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1968  desc,
1969  av_err2str(ret));
1970  exit_program(1);
1971  }
1972  }
1973 
1974  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1975  if (ret < 0 && ret != AVERROR_EOF) {
1976  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1977  desc,
1978  av_err2str(ret));
1979  exit_program(1);
1980  }
1981  if (ost->logfile && enc->stats_out) {
1982  fprintf(ost->logfile, "%s", enc->stats_out);
1983  }
1984  if (ret == AVERROR_EOF) {
1985  output_packet(of, pkt, ost, 1);
1986  break;
1987  }
1988  if (ost->finished & MUXER_FINISHED) {
1990  continue;
1991  }
1993  pkt_size = pkt->size;
1994  output_packet(of, pkt, ost, 0);
1996  do_video_stats(ost, pkt_size);
1997  }
1998  }
1999  }
2000 }
2001 
2002 /*
2003  * Check whether a packet from ist should be written into ost at this time
2004  */
2006 {
2008  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2009 
2010  if (ost->source_index != ist_index)
2011  return 0;
2012 
2013  if (ost->finished)
2014  return 0;
2015 
2016  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2017  return 0;
2018 
2019  return 1;
2020 }
2021 
2023 {
2025  InputFile *f = input_files [ist->file_index];
2026  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2027  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2028  AVPacket *opkt = ost->pkt;
2029 
2030  av_packet_unref(opkt);
2031  // EOF: flush output bitstream filters.
2032  if (!pkt) {
2033  output_packet(of, opkt, ost, 1);
2034  return;
2035  }
2036 
2037  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2039  return;
2040 
2041  if (!ost->frame_number && !ost->copy_prior_start) {
2042  int64_t comp_start = start_time;
2043  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2044  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2045  if (pkt->pts == AV_NOPTS_VALUE ?
2046  ist->pts < comp_start :
2047  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2048  return;
2049  }
2050 
2051  if (of->recording_time != INT64_MAX &&
2052  ist->pts >= of->recording_time + start_time) {
2054  return;
2055  }
2056 
2057  if (f->recording_time != INT64_MAX) {
2058  start_time = 0;
2059  if (copy_ts) {
2060  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2061  start_time += start_at_zero ? 0 : f->ctx->start_time;
2062  }
2063  if (ist->pts >= f->recording_time + start_time) {
2065  return;
2066  }
2067  }
2068 
2069  if (av_packet_ref(opkt, pkt) < 0)
2070  exit_program(1);
2071 
2072  if (pkt->pts != AV_NOPTS_VALUE)
2073  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2074 
2075  if (pkt->dts == AV_NOPTS_VALUE) {
2077  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2078  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2079  if(!duration)
2080  duration = ist->dec_ctx->frame_size;
2081  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2082  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2083  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2084  /* dts will be set immediately afterwards to what pts is now */
2085  opkt->pts = opkt->dts - ost_tb_start_time;
2086  } else
2087  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2088  opkt->dts -= ost_tb_start_time;
2089 
2090  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2091 
2092  ost->sync_opts += opkt->duration;
2093 
2094  output_packet(of, opkt, ost, 0);
2095 }
2096 
2098 {
2099  AVCodecContext *dec = ist->dec_ctx;
2100 
2101  if (!dec->channel_layout) {
2102  char layout_name[256];
2103 
2104  if (dec->channels > ist->guess_layout_max)
2105  return 0;
2106  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2107  if (!dec->channel_layout)
2108  return 0;
2109  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2110  dec->channels, dec->channel_layout);
2111  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2112  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2113  }
2114  return 1;
2115 }
2116 
2118 {
2119  if (*got_output || ret<0)
2120  decode_error_stat[ret<0] ++;
2121 
2122  if (ret < 0 && exit_on_error)
2123  exit_program(1);
2124 
2125  if (*got_output && ist) {
2126  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2128  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2129  if (exit_on_error)
2130  exit_program(1);
2131  }
2132  }
2133 }
2134 
2135 // Filters can be configured only if the formats of all inputs are known.
2137 {
2138  int i;
2139  for (i = 0; i < fg->nb_inputs; i++) {
2140  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2141  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2142  return 0;
2143  }
2144  return 1;
2145 }
2146 
2147 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
2148 {
2149  FilterGraph *fg = ifilter->graph;
2150  AVFrameSideData *sd;
2151  int need_reinit, ret;
2152  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
2153 
2154  if (keep_reference)
2155  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
2156 
2157  /* determine if the parameters for this input changed */
2158  need_reinit = ifilter->format != frame->format;
2159 
2160  switch (ifilter->ist->st->codecpar->codec_type) {
2161  case AVMEDIA_TYPE_AUDIO:
2162  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2163  ifilter->channels != frame->channels ||
2164  ifilter->channel_layout != frame->channel_layout;
2165  break;
2166  case AVMEDIA_TYPE_VIDEO:
2167  need_reinit |= ifilter->width != frame->width ||
2168  ifilter->height != frame->height;
2169  break;
2170  }
2171 
2172  if (!ifilter->ist->reinit_filters && fg->graph)
2173  need_reinit = 0;
2174 
2175  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2176  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2177  need_reinit = 1;
2178 
2180  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2181  need_reinit = 1;
2182  } else if (ifilter->displaymatrix)
2183  need_reinit = 1;
2184 
2185  if (need_reinit) {
2187  if (ret < 0)
2188  return ret;
2189  }
2190 
2191  /* (re)init the graph if possible, otherwise buffer the frame and return */
2192  if (need_reinit || !fg->graph) {
2193  if (!ifilter_has_all_input_formats(fg)) {
2195  if (!tmp)
2196  return AVERROR(ENOMEM);
2197 
2200  if (ret < 0) {
2201  av_frame_free(&tmp);
2202  return ret;
2203  }
2204  }
2206  return 0;
2207  }
2208 
2209  ret = reap_filters(1);
2210  if (ret < 0 && ret != AVERROR_EOF) {
2211  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2212  return ret;
2213  }
2214 
2215  ret = configure_filtergraph(fg);
2216  if (ret < 0) {
2217  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2218  return ret;
2219  }
2220  }
2221 
2222  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2223  if (ret < 0) {
2224  if (ret != AVERROR_EOF)
2225  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226  return ret;
2227  }
2228 
2229  return 0;
2230 }
2231 
2233 {
2234  int ret;
2235 
2236  ifilter->eof = 1;
2237 
2238  if (ifilter->filter) {
2240  if (ret < 0)
2241  return ret;
2242  } else {
2243  // the filtergraph was never configured
2244  if (ifilter->format < 0)
2247  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2248  return AVERROR_INVALIDDATA;
2249  }
2250  }
2251 
2252  return 0;
2253 }
2254 
2255 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2256 // There is the following difference: if you got a frame, you must call
2257 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2258 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2259 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2260 {
2261  int ret;
2262 
2263  *got_frame = 0;
2264 
2265  if (pkt) {
2266  ret = avcodec_send_packet(avctx, pkt);
2267  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2268  // decoded frames with avcodec_receive_frame() until done.
2269  if (ret < 0 && ret != AVERROR_EOF)
2270  return ret;
2271  }
2272 
2273  ret = avcodec_receive_frame(avctx, frame);
2274  if (ret < 0 && ret != AVERROR(EAGAIN))
2275  return ret;
2276  if (ret >= 0)
2277  *got_frame = 1;
2278 
2279  return 0;
2280 }
2281 
2283 {
2284  int i, ret;
2285 
2286  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287  for (i = 0; i < ist->nb_filters; i++) {
2288  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2289  if (ret == AVERROR_EOF)
2290  ret = 0; /* ignore */
2291  if (ret < 0) {
2293  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2294  break;
2295  }
2296  }
2297  return ret;
2298 }
2299 
2301  int *decode_failed)
2302 {
2303  AVFrame *decoded_frame = ist->decoded_frame;
2304  AVCodecContext *avctx = ist->dec_ctx;
2305  int ret, err = 0;
2306  AVRational decoded_frame_tb;
2307 
2309  ret = decode(avctx, decoded_frame, got_output, pkt);
2310  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2311  if (ret < 0)
2312  *decode_failed = 1;
2313 
2314  if (ret >= 0 && avctx->sample_rate <= 0) {
2315  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2317  }
2318 
2319  if (ret != AVERROR_EOF)
2321 
2322  if (!*got_output || ret < 0)
2323  return ret;
2324 
2325  ist->samples_decoded += decoded_frame->nb_samples;
2326  ist->frames_decoded++;
2327 
2328  /* increment next_dts to use for the case where the input stream does not
2329  have timestamps or there are multiple frames in the packet */
2330  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331  avctx->sample_rate;
2332  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2333  avctx->sample_rate;
2334 
2335  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2336  decoded_frame_tb = ist->st->time_base;
2337  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2338  decoded_frame->pts = pkt->pts;
2339  decoded_frame_tb = ist->st->time_base;
2340  }else {
2341  decoded_frame->pts = ist->dts;
2342  decoded_frame_tb = AV_TIME_BASE_Q;
2343  }
2344  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2345  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2346  ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
2347  if (pkt)
2348  ist->prev_pkt_pts = pkt->pts;
2350  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2351  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2352  (AVRational){1, avctx->sample_rate});
2353  ist->nb_samples = decoded_frame->nb_samples;
2355 
2357  return err < 0 ? err : ret;
2358 }
2359 
2360 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2361  int *decode_failed)
2362 {
2363  AVFrame *decoded_frame = ist->decoded_frame;
2364  int i, ret = 0, err = 0;
2365  int64_t best_effort_timestamp;
2366  int64_t dts = AV_NOPTS_VALUE;
2367 
2368  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2369  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2370  // skip the packet.
2371  if (!eof && pkt && pkt->size == 0)
2372  return 0;
2373 
2374  if (ist->dts != AV_NOPTS_VALUE)
2375  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2376  if (pkt) {
2377  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2378  }
2379 
2380  // The old code used to set dts on the drain packet, which does not work
2381  // with the new API anymore.
2382  if (eof) {
2383  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384  if (!new)
2385  return AVERROR(ENOMEM);
2386  ist->dts_buffer = new;
2387  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2388  }
2389 
2391  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2392  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2393  if (ret < 0)
2394  *decode_failed = 1;
2395 
2396  // The following line may be required in some cases where there is no parser
2397  // or the parser does not has_b_frames correctly
2398  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2399  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2400  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2401  } else
2402  av_log(ist->dec_ctx, AV_LOG_WARNING,
2403  "video_delay is larger in decoder than demuxer %d > %d.\n"
2404  "If you want to help, upload a sample "
2405  "of this file to https://streams.videolan.org/upload/ "
2406  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2407  ist->dec_ctx->has_b_frames,
2408  ist->st->codecpar->video_delay);
2409  }
2410 
2411  if (ret != AVERROR_EOF)
2413 
2414  if (*got_output && ret >= 0) {
2415  if (ist->dec_ctx->width != decoded_frame->width ||
2416  ist->dec_ctx->height != decoded_frame->height ||
2417  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2418  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2422  ist->dec_ctx->width,
2423  ist->dec_ctx->height,
2424  ist->dec_ctx->pix_fmt);
2425  }
2426  }
2427 
2428  if (!*got_output || ret < 0)
2429  return ret;
2430 
2431  if(ist->top_field_first>=0)
2432  decoded_frame->top_field_first = ist->top_field_first;
2433 
2434  ist->frames_decoded++;
2435 
2436  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2437  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2438  if (err < 0)
2439  goto fail;
2440  }
2441  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2442 
2443  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2444  *duration_pts = decoded_frame->pkt_duration;
2445 
2446  if (ist->framerate.num)
2447  best_effort_timestamp = ist->cfr_next_pts++;
2448 
2449  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2450  best_effort_timestamp = ist->dts_buffer[0];
2451 
2452  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2453  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2454  ist->nb_dts_buffer--;
2455  }
2456 
2457  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2458  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2459 
2460  if (ts != AV_NOPTS_VALUE)
2461  ist->next_pts = ist->pts = ts;
2462  }
2463 
2464  if (debug_ts) {
2465  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2466  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2467  ist->st->index, av_ts2str(decoded_frame->pts),
2468  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2469  best_effort_timestamp,
2470  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2472  ist->st->time_base.num, ist->st->time_base.den);
2473  }
2474 
2475  if (ist->st->sample_aspect_ratio.num)
2476  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2477 
2479 
2480 fail:
2482  return err < 0 ? err : ret;
2483 }
2484 
2486  int *decode_failed)
2487 {
2489  int free_sub = 1;
2490  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2491  &subtitle, got_output, pkt);
2492 
2494 
2495  if (ret < 0 || !*got_output) {
2496  *decode_failed = 1;
2497  if (!pkt->size)
2499  return ret;
2500  }
2501 
2502  if (ist->fix_sub_duration) {
2503  int end = 1;
2504  if (ist->prev_sub.got_output) {
2505  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2506  1000, AV_TIME_BASE);
2507  if (end < ist->prev_sub.subtitle.end_display_time) {
2508  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2509  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2510  ist->prev_sub.subtitle.end_display_time, end,
2511  end <= 0 ? ", dropping it" : "");
2512  ist->prev_sub.subtitle.end_display_time = end;
2513  }
2514  }
2515  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2516  FFSWAP(int, ret, ist->prev_sub.ret);
2517  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2518  if (end <= 0)
2519  goto out;
2520  }
2521 
2522  if (!*got_output)
2523  return ret;
2524 
2525  if (ist->sub2video.frame) {
2526  sub2video_update(ist, INT64_MIN, &subtitle);
2527  } else if (ist->nb_filters) {
2528  if (!ist->sub2video.sub_queue)
2529  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2530  if (!ist->sub2video.sub_queue)
2531  exit_program(1);
2532  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2533  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2534  if (ret < 0)
2535  exit_program(1);
2536  }
2537  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2538  free_sub = 0;
2539  }
2540 
2541  if (!subtitle.num_rects)
2542  goto out;
2543 
2544  ist->frames_decoded++;
2545 
2546  for (i = 0; i < nb_output_streams; i++) {
2548 
2550  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2551  continue;
2552 
2554  }
2555 
2556 out:
2557  if (free_sub)
2559  return ret;
2560 }
2561 
2563 {
2564  int i, ret;
2565  /* TODO keep pts also in stream time base to avoid converting back */
2566  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2568 
2569  for (i = 0; i < ist->nb_filters; i++) {
2570  ret = ifilter_send_eof(ist->filters[i], pts);
2571  if (ret < 0)
2572  return ret;
2573  }
2574  return 0;
2575 }
2576 
2577 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2578 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2579 {
2580  int ret = 0, i;
2581  int repeating = 0;
2582  int eof_reached = 0;
2583 
2584  AVPacket *avpkt = ist->pkt;
2585 
2586  if (!ist->saw_first_ts) {
2587  ist->first_dts =
2588  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2589  ist->pts = 0;
2590  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2591  ist->first_dts =
2592  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2593  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2594  }
2595  ist->saw_first_ts = 1;
2596  }
2597 
2598  if (ist->next_dts == AV_NOPTS_VALUE)
2599  ist->next_dts = ist->dts;
2600  if (ist->next_pts == AV_NOPTS_VALUE)
2601  ist->next_pts = ist->pts;
2602 
2603  if (pkt) {
2604  av_packet_unref(avpkt);
2605  ret = av_packet_ref(avpkt, pkt);
2606  if (ret < 0)
2607  return ret;
2608  }
2609 
2610  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2611  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2612  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2613  ist->next_pts = ist->pts = ist->dts;
2614  }
2615 
2616  // while we have more to decode or while the decoder did output something on EOF
2617  while (ist->decoding_needed) {
2618  int64_t duration_dts = 0;
2619  int64_t duration_pts = 0;
2620  int got_output = 0;
2621  int decode_failed = 0;
2622 
2623  ist->pts = ist->next_pts;
2624  ist->dts = ist->next_dts;
2625 
2626  switch (ist->dec_ctx->codec_type) {
2627  case AVMEDIA_TYPE_AUDIO:
2628  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2629  &decode_failed);
2630  av_packet_unref(avpkt);
2631  break;
2632  case AVMEDIA_TYPE_VIDEO:
2633  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2634  &decode_failed);
2635  if (!repeating || !pkt || got_output) {
2636  if (pkt && pkt->duration) {
2637  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2638  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2639  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2640  duration_dts = ((int64_t)AV_TIME_BASE *
2641  ist->dec_ctx->framerate.den * ticks) /
2642  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2643  }
2644 
2645  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2646  ist->next_dts += duration_dts;
2647  }else
2648  ist->next_dts = AV_NOPTS_VALUE;
2649  }
2650 
2651  if (got_output) {
2652  if (duration_pts > 0) {
2653  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2654  } else {
2655  ist->next_pts += duration_dts;
2656  }
2657  }
2658  av_packet_unref(avpkt);
2659  break;
2660  case AVMEDIA_TYPE_SUBTITLE:
2661  if (repeating)
2662  break;
2663  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2664  if (!pkt && ret >= 0)
2665  ret = AVERROR_EOF;
2666  av_packet_unref(avpkt);
2667  break;
2668  default:
2669  return -1;
2670  }
2671 
2672  if (ret == AVERROR_EOF) {
2673  eof_reached = 1;
2674  break;
2675  }
2676 
2677  if (ret < 0) {
2678  if (decode_failed) {
2679  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2680  ist->file_index, ist->st->index, av_err2str(ret));
2681  } else {
2682  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2683  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2684  }
2685  if (!decode_failed || exit_on_error)
2686  exit_program(1);
2687  break;
2688  }
2689 
2690  if (got_output)
2691  ist->got_output = 1;
2692 
2693  if (!got_output)
2694  break;
2695 
2696  // During draining, we might get multiple output frames in this loop.
2697  // ffmpeg.c does not drain the filter chain on configuration changes,
2698  // which means if we send multiple frames at once to the filters, and
2699  // one of those frames changes configuration, the buffered frames will
2700  // be lost. This can upset certain FATE tests.
2701  // Decode only 1 frame per call on EOF to appease these FATE tests.
2702  // The ideal solution would be to rewrite decoding to use the new
2703  // decoding API in a better way.
2704  if (!pkt)
2705  break;
2706 
2707  repeating = 1;
2708  }
2709 
2710  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2711  /* except when looping we need to flush but not to send an EOF */
2712  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2713  int ret = send_filter_eof(ist);
2714  if (ret < 0) {
2715  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2716  exit_program(1);
2717  }
2718  }
2719 
2720  /* handle stream copy */
2721  if (!ist->decoding_needed && pkt) {
2722  ist->dts = ist->next_dts;
2723  switch (ist->dec_ctx->codec_type) {
2724  case AVMEDIA_TYPE_AUDIO:
2725  av_assert1(pkt->duration >= 0);
2726  if (ist->dec_ctx->sample_rate) {
2727  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2728  ist->dec_ctx->sample_rate;
2729  } else {
2730  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2731  }
2732  break;
2733  case AVMEDIA_TYPE_VIDEO:
2734  if (ist->framerate.num) {
2735  // TODO: Remove work-around for c99-to-c89 issue 7
2736  AVRational time_base_q = AV_TIME_BASE_Q;
2737  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2738  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2739  } else if (pkt->duration) {
2740  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2741  } else if(ist->dec_ctx->framerate.num != 0) {
2742  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2743  ist->next_dts += ((int64_t)AV_TIME_BASE *
2744  ist->dec_ctx->framerate.den * ticks) /
2745  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2746  }
2747  break;
2748  }
2749  ist->pts = ist->dts;
2750  ist->next_pts = ist->next_dts;
2751  }
2752  for (i = 0; i < nb_output_streams; i++) {
2754 
2756  continue;
2757 
2758  do_streamcopy(ist, ost, pkt);
2759  }
2760 
2761  return !eof_reached;
2762 }
2763 
2764 static int print_sdp(void)
2765 {
2766  char sdp[16384];
2767  int i;
2768  int j, ret;
2769  AVIOContext *sdp_pb;
2770  AVFormatContext **avc;
2771 
2772  for (i = 0; i < nb_output_files; i++) {
2773  if (!output_files[i]->header_written)
2774  return 0;
2775  }
2776 
2777  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2778  if (!avc)
2779  exit_program(1);
2780  for (i = 0, j = 0; i < nb_output_files; i++) {
2781  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2782  avc[j] = output_files[i]->ctx;
2783  j++;
2784  }
2785  }
2786 
2787  if (!j) {
2788  av_log(NULL, AV_LOG_ERROR, "No output streams in the SDP.\n");
2789  ret = AVERROR(EINVAL);
2790  goto fail;
2791  }
2792 
2793  ret = av_sdp_create(avc, j, sdp, sizeof(sdp));
2794  if (ret < 0)
2795  goto fail;
2796 
2797  if (!sdp_filename) {
2798  printf("SDP:\n%s\n", sdp);
2799  fflush(stdout);
2800  } else {
2802  if (ret < 0) {
2803  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2804  goto fail;
2805  }
2806 
2807  avio_print(sdp_pb, sdp);
2808  avio_closep(&sdp_pb);
2810  }
2811 
2812 fail:
2813  av_freep(&avc);
2814  return ret;
2815 }
2816 
2818 {
2819  InputStream *ist = s->opaque;
2820  const enum AVPixelFormat *p;
2821  int ret;
2822 
2823  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2825  const AVCodecHWConfig *config = NULL;
2826  int i;
2827 
2828  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2829  break;
2830 
2831  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2832  ist->hwaccel_id == HWACCEL_AUTO) {
2833  for (i = 0;; i++) {
2834  config = avcodec_get_hw_config(s->codec, i);
2835  if (!config)
2836  break;
2837  if (!(config->methods &
2839  continue;
2840  if (config->pix_fmt == *p)
2841  break;
2842  }
2843  }
2844  if (config && config->device_type == ist->hwaccel_device_type) {
2846  if (ret < 0) {
2847  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2849  "%s hwaccel requested for input stream #%d:%d, "
2850  "but cannot be initialized.\n",
2851  av_hwdevice_get_type_name(config->device_type),
2852  ist->file_index, ist->st->index);
2853  return AV_PIX_FMT_NONE;
2854  }
2855  continue;
2856  }
2857 
2858  ist->hwaccel_pix_fmt = *p;
2859  break;
2860  }
2861  }
2862 
2863  return *p;
2864 }
2865 
2866 static int init_input_stream(int ist_index, char *error, int error_len)
2867 {
2868  int ret;
2869  InputStream *ist = input_streams[ist_index];
2870 
2871  if (ist->decoding_needed) {
2872  const AVCodec *codec = ist->dec;
2873  if (!codec) {
2874  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2875  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2876  return AVERROR(EINVAL);
2877  }
2878 
2879  ist->dec_ctx->opaque = ist;
2880  ist->dec_ctx->get_format = get_format;
2881 #if LIBAVCODEC_VERSION_MAJOR < 60
2883  ist->dec_ctx->thread_safe_callbacks = 1;
2885 #endif
2886 
2887  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2888  (ist->decoding_needed & DECODING_FOR_OST)) {
2889  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2890  if (ist->decoding_needed & DECODING_FOR_FILTER)
2891  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2892  }
2893 
2894  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2895  * audio, and video decoders such as cuvid or mediacodec */
2896  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2897 
2898  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2899  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2900  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2901  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2902  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2903 
2905  if (ret < 0) {
2906  snprintf(error, error_len, "Device setup failed for "
2907  "decoder on input stream #%d:%d : %s",
2908  ist->file_index, ist->st->index, av_err2str(ret));
2909  return ret;
2910  }
2911 
2912  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2913  if (ret == AVERROR_EXPERIMENTAL)
2914  abort_codec_experimental(codec, 0);
2915 
2916  snprintf(error, error_len,
2917  "Error while opening decoder for input stream "
2918  "#%d:%d : %s",
2919  ist->file_index, ist->st->index, av_err2str(ret));
2920  return ret;
2921  }
2922  assert_avoptions(ist->decoder_opts);
2923  }
2924 
2925  ist->next_pts = AV_NOPTS_VALUE;
2926  ist->next_dts = AV_NOPTS_VALUE;
2927 
2928  return 0;
2929 }
2930 
2932 {
2933  if (ost->source_index >= 0)
2934  return input_streams[ost->source_index];
2935  return NULL;
2936 }
2937 
2938 static int compare_int64(const void *a, const void *b)
2939 {
2940  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2941 }
2942 
2943 /* open the muxer when all the streams are initialized */
2945 {
2946  int ret, i;
2947 
2948  for (i = 0; i < of->ctx->nb_streams; i++) {
2950  if (!ost->initialized)
2951  return 0;
2952  }
2953 
2954  ret = avformat_write_header(of->ctx, &of->opts);
2955  if (ret < 0) {
2957  "Could not write header for output file #%d "
2958  "(incorrect codec parameters ?): %s\n",
2960  return ret;
2961  }
2962  //assert_avoptions(of->opts);
2963  of->header_written = 1;
2964 
2965  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2966  nb_output_dumped++;
2967 
2968  if (sdp_filename || want_sdp) {
2969  ret = print_sdp();
2970  if (ret < 0) {
2971  av_log(NULL, AV_LOG_ERROR, "Error writing the SDP.\n");
2972  return ret;
2973  }
2974  }
2975 
2976  /* flush the muxing queues */
2977  for (i = 0; i < of->ctx->nb_streams; i++) {
2979 
2980  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2981  if (!av_fifo_size(ost->muxing_queue))
2983 
2984  while (av_fifo_size(ost->muxing_queue)) {
2985  AVPacket *pkt;
2988  write_packet(of, pkt, ost, 1);
2989  av_packet_free(&pkt);
2990  }
2991  }
2992 
2993  return 0;
2994 }
2995 
2997 {
2999  int ret;
3000 
3001  if (!ctx)
3002  return 0;
3003 
3004  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3005  if (ret < 0)
3006  return ret;
3007 
3008  ctx->time_base_in = ost->st->time_base;
3009 
3010  ret = av_bsf_init(ctx);
3011  if (ret < 0) {
3012  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3013  ctx->filter->name);
3014  return ret;
3015  }
3016 
3017  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3018  if (ret < 0)
3019  return ret;
3020  ost->st->time_base = ctx->time_base_out;
3021 
3022  return 0;
3023 }
3024 
3026 {
3029  AVCodecParameters *par_dst = ost->st->codecpar;
3030  AVCodecParameters *par_src = ost->ref_par;
3031  AVRational sar;
3032  int i, ret;
3033  uint32_t codec_tag = par_dst->codec_tag;
3034 
3035  av_assert0(ist && !ost->filter);
3036 
3037  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3038  if (ret >= 0)
3040  if (ret < 0) {
3042  "Error setting up codec context options.\n");
3043  return ret;
3044  }
3045 
3047  if (ret < 0) {
3049  "Error getting reference codec parameters.\n");
3050  return ret;
3051  }
3052 
3053  if (!codec_tag) {
3054  unsigned int codec_tag_tmp;
3055  if (!of->ctx->oformat->codec_tag ||
3056  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3057  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3058  codec_tag = par_src->codec_tag;
3059  }
3060 
3061  ret = avcodec_parameters_copy(par_dst, par_src);
3062  if (ret < 0)
3063  return ret;
3064 
3065  par_dst->codec_tag = codec_tag;
3066 
3067  if (!ost->frame_rate.num)
3068  ost->frame_rate = ist->framerate;
3069 
3070  if (ost->frame_rate.num)
3072  else
3073  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3074 
3076  if (ret < 0)
3077  return ret;
3078 
3079  // copy timebase while removing common factors
3080  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3081  if (ost->frame_rate.num)
3083  else
3085  }
3086 
3087  // copy estimated duration as a hint to the muxer
3088  if (ost->st->duration <= 0 && ist->st->duration > 0)
3089  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3090 
3091  if (ist->st->nb_side_data) {
3092  for (i = 0; i < ist->st->nb_side_data; i++) {
3093  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3094  uint8_t *dst_data;
3095 
3096  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3097  if (!dst_data)
3098  return AVERROR(ENOMEM);
3099  memcpy(dst_data, sd_src->data, sd_src->size);
3100  }
3101  }
3102 
3103  if (ost->rotate_overridden) {
3105  sizeof(int32_t) * 9);
3106  if (sd)
3108  }
3109 
3110  switch (par_dst->codec_type) {
3111  case AVMEDIA_TYPE_AUDIO:
3112  if (audio_volume != 256) {
3113  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3114  exit_program(1);
3115  }
3116  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3117  par_dst->block_align= 0;
3118  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3119  par_dst->block_align= 0;
3120  break;
3121  case AVMEDIA_TYPE_VIDEO:
3122  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3123  sar =
3125  (AVRational){ par_dst->height, par_dst->width });
3126  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3127  "with stream copy may produce invalid files\n");
3128  }
3129  else if (ist->st->sample_aspect_ratio.num)
3130  sar = ist->st->sample_aspect_ratio;
3131  else
3132  sar = par_src->sample_aspect_ratio;
3133  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3134  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3135  ost->st->r_frame_rate = ist->st->r_frame_rate;
3136  break;
3137  }
3138 
3139  ost->mux_timebase = ist->st->time_base;
3140 
3141  return 0;
3142 }
3143 
3145 {
3146  const AVDictionaryEntry *e;
3147 
3148  uint8_t *encoder_string;
3149  int encoder_string_len;
3150  int format_flags = 0;
3151  int codec_flags = ost->enc_ctx->flags;
3152 
3153  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3154  return;
3155 
3156  e = av_dict_get(of->opts, "fflags", NULL, 0);
3157  if (e) {
3158  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3159  if (!o)
3160  return;
3161  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3162  }
3163  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3164  if (e) {
3165  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3166  if (!o)
3167  return;
3169  }
3170 
3171  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3172  encoder_string = av_mallocz(encoder_string_len);
3173  if (!encoder_string)
3174  exit_program(1);
3175 
3176  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3177  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3178  else
3179  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3180  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3181  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3183 }
3184 
3186  AVCodecContext *avctx)
3187 {
3188  char *p;
3189  int n = 1, i, size, index = 0;
3190  int64_t t, *pts;
3191 
3192  for (p = kf; *p; p++)
3193  if (*p == ',')
3194  n++;
3195  size = n;
3196  pts = av_malloc_array(size, sizeof(*pts));
3197  if (!pts) {
3198  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3199  exit_program(1);
3200  }
3201 
3202  p = kf;
3203  for (i = 0; i < n; i++) {
3204  char *next = strchr(p, ',');
3205 
3206  if (next)
3207  *next++ = 0;
3208 
3209  if (!memcmp(p, "chapters", 8)) {
3210 
3212  int j;
3213 
3214  if (avf->nb_chapters > INT_MAX - size ||
3215  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3216  sizeof(*pts)))) {
3218  "Could not allocate forced key frames array.\n");
3219  exit_program(1);
3220  }
3221  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3222  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3223 
3224  for (j = 0; j < avf->nb_chapters; j++) {
3225  AVChapter *c = avf->chapters[j];
3226  av_assert1(index < size);
3227  pts[index++] = av_rescale_q(c->start, c->time_base,
3228  avctx->time_base) + t;
3229  }
3230 
3231  } else {
3232 
3233  t = parse_time_or_die("force_key_frames", p, 1);
3234  av_assert1(index < size);
3235  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3236 
3237  }
3238 
3239  p = next;
3240  }
3241 
3242  av_assert0(index == size);
3243  qsort(pts, size, sizeof(*pts), compare_int64);
3245  ost->forced_kf_pts = pts;
3246 }
3247 
3248 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3249 {
3251  AVCodecContext *enc_ctx = ost->enc_ctx;
3252  AVFormatContext *oc;
3253 
3254  if (ost->enc_timebase.num > 0) {
3255  enc_ctx->time_base = ost->enc_timebase;
3256  return;
3257  }
3258 
3259  if (ost->enc_timebase.num < 0) {
3260  if (ist) {
3261  enc_ctx->time_base = ist->st->time_base;
3262  return;
3263  }
3264 
3265  oc = output_files[ost->file_index]->ctx;
3266  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3267  }
3268 
3269  enc_ctx->time_base = default_time_base;
3270 }
3271 
3273 {
3275  AVCodecContext *enc_ctx = ost->enc_ctx;
3278  int ret;
3279 
3281 
3282  if (ist) {
3283  dec_ctx = ist->dec_ctx;
3284  }
3285 
3286  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3287  if (!ost->frame_rate.num)
3289  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3290  ost->frame_rate = (AVRational){25, 1};
3292  "No information "
3293  "about the input framerate is available. Falling "
3294  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3295  "if you want a different framerate.\n",
3296  ost->file_index, ost->index);
3297  }
3298 
3299  if (ost->max_frame_rate.num &&
3301  !ost->frame_rate.den))
3303 
3304  if (ost->enc->supported_framerates && !ost->force_fps) {
3305  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3306  ost->frame_rate = ost->enc->supported_framerates[idx];
3307  }
3308  // reduce frame rate for mpeg4 to be within the spec limits
3309  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3311  ost->frame_rate.num, ost->frame_rate.den, 65535);
3312  }
3313  }
3314 
3315  switch (enc_ctx->codec_type) {
3316  case AVMEDIA_TYPE_AUDIO:
3321 
3322  if (ost->bits_per_raw_sample)
3324  else if (dec_ctx && ost->filter->graph->is_meta)
3326  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3327 
3329  break;
3330 
3331  case AVMEDIA_TYPE_VIDEO:
3333 
3334  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3336  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3338  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3339  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3340  }
3341 
3342  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3343  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3345  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3346  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3348 
3350 
3351  if (ost->bits_per_raw_sample)
3353  else if (dec_ctx && ost->filter->graph->is_meta)
3355  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3356 
3357  if (frame) {
3358  enc_ctx->color_range = frame->color_range;
3359  enc_ctx->color_primaries = frame->color_primaries;
3360  enc_ctx->color_trc = frame->color_trc;
3361  enc_ctx->colorspace = frame->colorspace;
3362  enc_ctx->chroma_sample_location = frame->chroma_location;
3363  }
3364 
3365  enc_ctx->framerate = ost->frame_rate;
3366 
3368 
3369  // Field order: autodetection
3370  if (frame) {
3372  ost->top_field_first >= 0)
3373  frame->top_field_first = !!ost->top_field_first;
3374 
3375  if (frame->interlaced_frame) {
3376  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3377  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3378  else
3379  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3380  } else
3381  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3382  }
3383 
3384  // Field order: override
3385  if (ost->top_field_first == 0) {
3386  enc_ctx->field_order = AV_FIELD_BB;
3387  } else if (ost->top_field_first == 1) {
3388  enc_ctx->field_order = AV_FIELD_TT;
3389  }
3390 
3391  if (ost->forced_keyframes) {
3392  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3395  if (ret < 0) {
3397  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3398  return ret;
3399  }
3404 
3405  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3406  // parse it only for static kf timings
3407  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3409  }
3410  }
3411  break;
3412  case AVMEDIA_TYPE_SUBTITLE:
3413  enc_ctx->time_base = AV_TIME_BASE_Q;
3414  if (!enc_ctx->width) {
3417  }
3418  break;
3419  case AVMEDIA_TYPE_DATA:
3420  break;
3421  default:
3422  abort();
3423  break;
3424  }
3425 
3426  ost->mux_timebase = enc_ctx->time_base;
3427 
3428  return 0;
3429 }
3430 
3432  char *error, int error_len)
3433 {
3434  int ret = 0;
3435 
3436  if (ost->encoding_needed) {
3437  const AVCodec *codec = ost->enc;
3438  AVCodecContext *dec = NULL;
3439  InputStream *ist;
3440 
3442  if (ret < 0)
3443  return ret;
3444 
3445  if ((ist = get_input_stream(ost)))
3446  dec = ist->dec_ctx;
3447  if (dec && dec->subtitle_header) {
3448  /* ASS code assumes this buffer is null terminated so add extra byte. */
3449  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3450  if (!ost->enc_ctx->subtitle_header)
3451  return AVERROR(ENOMEM);
3452  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3453  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3454  }
3455  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3456  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3457 
3459  if (ret < 0) {
3460  snprintf(error, error_len, "Device setup failed for "
3461  "encoder on output stream #%d:%d : %s",
3463  return ret;
3464  }
3465 
3466  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3467  int input_props = 0, output_props = 0;
3468  AVCodecDescriptor const *input_descriptor =
3469  avcodec_descriptor_get(dec->codec_id);
3470  AVCodecDescriptor const *output_descriptor =
3472  if (input_descriptor)
3473  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3474  if (output_descriptor)
3475  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3476  if (input_props && output_props && input_props != output_props) {
3477  snprintf(error, error_len,
3478  "Subtitle encoding currently only possible from text to text "
3479  "or bitmap to bitmap");
3480  return AVERROR_INVALIDDATA;
3481  }
3482  }
3483 
3484  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3485  if (ret == AVERROR_EXPERIMENTAL)
3486  abort_codec_experimental(codec, 1);
3487  snprintf(error, error_len,
3488  "Error while opening encoder for output stream #%d:%d - "
3489  "maybe incorrect parameters such as bit_rate, rate, width or height",
3490  ost->file_index, ost->index);
3491  return ret;
3492  }
3493  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3494  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3496  ost->enc_ctx->frame_size);
3498  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3499  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3500  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3501  " It takes bits/s as argument, not kbits/s\n");
3502 
3504  if (ret < 0) {
3506  "Error initializing the output stream codec context.\n");
3507  exit_program(1);
3508  }
3509 
3510  if (ost->enc_ctx->nb_coded_side_data) {
3511  int i;
3512 
3513  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3514  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3515  uint8_t *dst_data;
3516 
3517  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3518  if (!dst_data)
3519  return AVERROR(ENOMEM);
3520  memcpy(dst_data, sd_src->data, sd_src->size);
3521  }
3522  }
3523 
3524  /*
3525  * Add global input side data. For now this is naive, and copies it
3526  * from the input stream's global side data. All side data should
3527  * really be funneled over AVFrame and libavfilter, then added back to
3528  * packet side data, and then potentially using the first packet for
3529  * global side data.
3530  */
3531  if (ist) {
3532  int i;
3533  for (i = 0; i < ist->st->nb_side_data; i++) {
3534  AVPacketSideData *sd = &ist->st->side_data[i];
3535  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3536  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3537  if (!dst)
3538  return AVERROR(ENOMEM);
3539  memcpy(dst, sd->data, sd->size);
3540  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3541  av_display_rotation_set((uint32_t *)dst, 0);
3542  }
3543  }
3544  }
3545 
3546  // copy timebase while removing common factors
3547  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3549 
3550  // copy estimated duration as a hint to the muxer
3551  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3552  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3553  } else if (ost->stream_copy) {
3555  if (ret < 0)
3556  return ret;
3557  }
3558 
3559  /* initialize bitstream filters for the output stream
3560  * needs to be done here, because the codec id for streamcopy is not
3561  * known until now */
3563  if (ret < 0)
3564  return ret;
3565 
3566  ost->initialized = 1;
3567 
3569  if (ret < 0)
3570  return ret;
3571 
3572  return ret;
3573 }
3574 
3575 static void report_new_stream(int input_index, AVPacket *pkt)
3576 {
3577  InputFile *file = input_files[input_index];
3578  AVStream *st = file->ctx->streams[pkt->stream_index];
3579 
3580  if (pkt->stream_index < file->nb_streams_warn)
3581  return;
3582  av_log(file->ctx, AV_LOG_WARNING,
3583  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3585  input_index, pkt->stream_index,
3587  file->nb_streams_warn = pkt->stream_index + 1;
3588 }
3589 
3590 static int transcode_init(void)
3591 {
3592  int ret = 0, i, j, k;
3593  AVFormatContext *oc;
3594  OutputStream *ost;
3595  InputStream *ist;
3596  char error[1024] = {0};
3597 
3598  for (i = 0; i < nb_filtergraphs; i++) {
3599  FilterGraph *fg = filtergraphs[i];
3600  for (j = 0; j < fg->nb_outputs; j++) {
3601  OutputFilter *ofilter = fg->outputs[j];
3602  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3603  continue;
3604  if (fg->nb_inputs != 1)
3605  continue;
3606  for (k = nb_input_streams-1; k >= 0 ; k--)
3607  if (fg->inputs[0]->ist == input_streams[k])
3608  break;
3609  ofilter->ost->source_index = k;
3610  }
3611  }
3612 
3613  /* init framerate emulation */
3614  for (i = 0; i < nb_input_files; i++) {
3616  if (ifile->readrate || ifile->rate_emu)
3617  for (j = 0; j < ifile->nb_streams; j++)
3618  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3619  }
3620 
3621  /* init input streams */
3622  for (i = 0; i < nb_input_streams; i++)
3623  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3624  for (i = 0; i < nb_output_streams; i++) {
3625  ost = output_streams[i];
3627  }
3628  goto dump_format;
3629  }
3630 
3631  /*
3632  * initialize stream copy and subtitle/data streams.
3633  * Encoded AVFrame based streams will get initialized as follows:
3634  * - when the first AVFrame is received in do_video_out
3635  * - just before the first AVFrame is received in either transcode_step
3636  * or reap_filters due to us requiring the filter chain buffer sink
3637  * to be configured with the correct audio frame size, which is only
3638  * known after the encoder is initialized.
3639  */
3640  for (i = 0; i < nb_output_streams; i++) {
3641  if (!output_streams[i]->stream_copy &&
3642  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3644  continue;
3645 
3647  if (ret < 0)
3648  goto dump_format;
3649  }
3650 
3651  /* discard unused programs */
3652  for (i = 0; i < nb_input_files; i++) {
3654  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3655  AVProgram *p = ifile->ctx->programs[j];
3656  int discard = AVDISCARD_ALL;
3657 
3658  for (k = 0; k < p->nb_stream_indexes; k++)
3659  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3661  break;
3662  }
3663  p->discard = discard;
3664  }
3665  }
3666 
3667  /* write headers for files with no streams */
3668  for (i = 0; i < nb_output_files; i++) {
3669  oc = output_files[i]->ctx;
3670  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3672  if (ret < 0)
3673  goto dump_format;
3674  }
3675  }
3676 
3677  dump_format:
3678  /* dump the stream mapping */
3679  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3680  for (i = 0; i < nb_input_streams; i++) {
3681  ist = input_streams[i];
3682 
3683  for (j = 0; j < ist->nb_filters; j++) {
3684  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3685  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3686  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3687  ist->filters[j]->name);
3688  if (nb_filtergraphs > 1)
3689  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3690  av_log(NULL, AV_LOG_INFO, "\n");
3691  }
3692  }
3693  }
3694 
3695  for (i = 0; i < nb_output_streams; i++) {
3696  ost = output_streams[i];
3697 
3698  if (ost->attachment_filename) {
3699  /* an attached file */
3700  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3702  continue;
3703  }
3704 
3706  /* output from a complex graph */
3707  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3708  if (nb_filtergraphs > 1)
3709  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3710 
3711  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3712  ost->index, ost->enc ? ost->enc->name : "?");
3713  continue;
3714  }
3715 
3716  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3719  ost->file_index,
3720  ost->index);
3722  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3724  ost->sync_ist->st->index);
3725  if (ost->stream_copy)
3726  av_log(NULL, AV_LOG_INFO, " (copy)");
3727  else {
3728  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3729  const AVCodec *out_codec = ost->enc;
3730  const char *decoder_name = "?";
3731  const char *in_codec_name = "?";
3732  const char *encoder_name = "?";
3733  const char *out_codec_name = "?";
3734  const AVCodecDescriptor *desc;
3735 
3736  if (in_codec) {
3737  decoder_name = in_codec->name;
3738  desc = avcodec_descriptor_get(in_codec->id);
3739  if (desc)
3740  in_codec_name = desc->name;
3741  if (!strcmp(decoder_name, in_codec_name))
3742  decoder_name = "native";
3743  }
3744 
3745  if (out_codec) {
3746  encoder_name = out_codec->name;
3747  desc = avcodec_descriptor_get(out_codec->id);
3748  if (desc)
3749  out_codec_name = desc->name;
3750  if (!strcmp(encoder_name, out_codec_name))
3751  encoder_name = "native";
3752  }
3753 
3754  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3755  in_codec_name, decoder_name,
3756  out_codec_name, encoder_name);
3757  }
3758  av_log(NULL, AV_LOG_INFO, "\n");
3759  }
3760 
3761  if (ret) {
3762  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3763  return ret;
3764  }
3765 
3767 
3768  return 0;
3769 }
3770 
3771 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3772 static int need_output(void)
3773 {
3774  int i;
3775 
3776  for (i = 0; i < nb_output_streams; i++) {
3780 
3781  if (ost->finished ||
3782  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3783  continue;
3784  if (ost->frame_number >= ost->max_frames) {
3785  int j;
3786  for (j = 0; j < of->ctx->nb_streams; j++)
3788  continue;
3789  }
3790 
3791  return 1;
3792  }
3793 
3794  return 0;
3795 }
3796 
3797 /**
3798  * Select the output stream to process.
3799  *
3800  * @return selected output stream, or NULL if none available
3801  */
3803 {
3804  int i;
3805  int64_t opts_min = INT64_MAX;
3806  OutputStream *ost_min = NULL;
3807 
3808  for (i = 0; i < nb_output_streams; i++) {
3810  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3812  AV_TIME_BASE_Q);
3815  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3817 
3818  if (!ost->initialized && !ost->inputs_done)
3819  return ost->unavailable ? NULL : ost;
3820 
3821  if (!ost->finished && opts < opts_min) {
3822  opts_min = opts;
3823  ost_min = ost->unavailable ? NULL : ost;
3824  }
3825  }
3826  return ost_min;
3827 }
3828 
3829 static void set_tty_echo(int on)
3830 {
3831 #if HAVE_TERMIOS_H
3832  struct termios tty;
3833  if (tcgetattr(0, &tty) == 0) {
3834  if (on) tty.c_lflag |= ECHO;
3835  else tty.c_lflag &= ~ECHO;
3836  tcsetattr(0, TCSANOW, &tty);
3837  }
3838 #endif
3839 }
3840 
3841 static int check_keyboard_interaction(int64_t cur_time)
3842 {
3843  int i, ret, key;
3844  static int64_t last_time;
3845  if (received_nb_signals)
3846  return AVERROR_EXIT;
3847  /* read_key() returns 0 on EOF */
3848  if(cur_time - last_time >= 100000 && !run_as_daemon){
3849  key = read_key();
3850  last_time = cur_time;
3851  }else
3852  key = -1;
3853  if (key == 'q') {
3854  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3855  return AVERROR_EXIT;
3856  }
3857  if (key == '+') av_log_set_level(av_log_get_level()+10);
3858  if (key == '-') av_log_set_level(av_log_get_level()-10);
3859  if (key == 's') qp_hist ^= 1;
3860  if (key == 'h'){
3861  if (do_hex_dump){
3862  do_hex_dump = do_pkt_dump = 0;
3863  } else if(do_pkt_dump){
3864  do_hex_dump = 1;
3865  } else
3866  do_pkt_dump = 1;
3868  }
3869  if (key == 'c' || key == 'C'){
3870  char buf[4096], target[64], command[256], arg[256] = {0};
3871  double time;
3872  int k, n = 0;
3873  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3874  i = 0;
3875  set_tty_echo(1);
3876  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3877  if (k > 0)
3878  buf[i++] = k;
3879  buf[i] = 0;
3880  set_tty_echo(0);
3881  fprintf(stderr, "\n");
3882  if (k > 0 &&
3883  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3884  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3885  target, time, command, arg);
3886  for (i = 0; i < nb_filtergraphs; i++) {
3887  FilterGraph *fg = filtergraphs[i];
3888  if (fg->graph) {
3889  if (time < 0) {
3890  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3891  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3892  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3893  } else if (key == 'c') {
3894  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3896  } else {
3897  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3898  if (ret < 0)
3899  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3900  }
3901  }
3902  }
3903  } else {
3905  "Parse error, at least 3 arguments were expected, "
3906  "only %d given in string '%s'\n", n, buf);
3907  }
3908  }
3909  if (key == 'd' || key == 'D'){
3910  int debug=0;
3911  if(key == 'D') {
3912  debug = input_streams[0]->dec_ctx->debug << 1;
3913  if(!debug) debug = 1;
3914  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3915  debug += debug;
3916  }else{
3917  char buf[32];
3918  int k = 0;
3919  i = 0;
3920  set_tty_echo(1);
3921  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3922  if (k > 0)
3923  buf[i++] = k;
3924  buf[i] = 0;
3925  set_tty_echo(0);
3926  fprintf(stderr, "\n");
3927  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3928  fprintf(stderr,"error parsing debug value\n");
3929  }
3930  for(i=0;i<nb_input_streams;i++) {
3931  input_streams[i]->dec_ctx->debug = debug;
3932  }
3933  for(i=0;i<nb_output_streams;i++) {
3935  ost->enc_ctx->debug = debug;
3936  }
3937  if(debug) av_log_set_level(AV_LOG_DEBUG);
3938  fprintf(stderr,"debug=%d\n", debug);
3939  }
3940  if (key == '?'){
3941  fprintf(stderr, "key function\n"
3942  "? show this help\n"
3943  "+ increase verbosity\n"
3944  "- decrease verbosity\n"
3945  "c Send command to first matching filter supporting it\n"
3946  "C Send/Queue command to all matching filters\n"
3947  "D cycle through available debug modes\n"
3948  "h dump packets/hex press to cycle through the 3 states\n"
3949  "q quit\n"
3950  "s Show QP histogram\n"
3951  );
3952  }
3953  return 0;
3954 }
3955 
3956 #if HAVE_THREADS
3957 static void *input_thread(void *arg)
3958 {
3959  InputFile *f = arg;
3960  AVPacket *pkt = f->pkt, *queue_pkt;
3961  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3962  int ret = 0;
3963 
3964  while (1) {
3965  ret = av_read_frame(f->ctx, pkt);
3966 
3967  if (ret == AVERROR(EAGAIN)) {
3968  av_usleep(10000);
3969  continue;
3970  }
3971  if (ret < 0) {
3972  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3973  break;
3974  }
3975  queue_pkt = av_packet_alloc();
3976  if (!queue_pkt) {
3978  av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
3979  break;
3980  }
3981  av_packet_move_ref(queue_pkt, pkt);
3982  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3983  if (flags && ret == AVERROR(EAGAIN)) {
3984  flags = 0;
3985  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3986  av_log(f->ctx, AV_LOG_WARNING,
3987  "Thread message queue blocking; consider raising the "
3988  "thread_queue_size option (current value: %d)\n",
3989  f->thread_queue_size);
3990  }
3991  if (ret < 0) {
3992  if (ret != AVERROR_EOF)
3993  av_log(f->ctx, AV_LOG_ERROR,
3994  "Unable to send packet to main thread: %s\n",
3995  av_err2str(ret));
3996  av_packet_free(&queue_pkt);
3997  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3998  break;
3999  }
4000  }
4001 
4002  return NULL;
4003 }
4004 
4005 static void free_input_thread(int i)
4006 {
4007  InputFile *f = input_files[i];
4008  AVPacket *pkt;
4009 
4010  if (!f || !f->in_thread_queue)
4011  return;
4013  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4014  av_packet_free(&pkt);
4015 
4016  pthread_join(f->thread, NULL);
4017  f->joined = 1;
4018  av_thread_message_queue_free(&f->in_thread_queue);
4019 }
4020 
4021 static void free_input_threads(void)
4022 {
4023  int i;
4024 
4025  for (i = 0; i < nb_input_files; i++)
4026  free_input_thread(i);
4027 }
4028 
4029 static int init_input_thread(int i)
4030 {
4031  int ret;
4032  InputFile *f = input_files[i];
4033 
4034  if (f->thread_queue_size < 0)
4035  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4036  if (!f->thread_queue_size)
4037  return 0;
4038 
4039  if (f->ctx->pb ? !f->ctx->pb->seekable :
4040  strcmp(f->ctx->iformat->name, "lavfi"))
4041  f->non_blocking = 1;
4042  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4043  f->thread_queue_size, sizeof(f->pkt));
4044  if (ret < 0)
4045  return ret;
4046 
4047  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4048  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4049  av_thread_message_queue_free(&f->in_thread_queue);
4050  return AVERROR(ret);
4051  }
4052 
4053  return 0;
4054 }
4055 
4056 static int init_input_threads(void)
4057 {
4058  int i, ret;
4059 
4060  for (i = 0; i < nb_input_files; i++) {
4061  ret = init_input_thread(i);
4062  if (ret < 0)
4063  return ret;
4064  }
4065  return 0;
4066 }
4067 
4068 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4069 {
4070  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4071  f->non_blocking ?
4073 }
4074 #endif
4075 
4077 {
4078  if (f->readrate || f->rate_emu) {
4079  int i;
4080  int64_t file_start = copy_ts * (
4081  (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
4082  (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
4083  );
4084  float