FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int want_sdp = 1;
133 
134 static int current_time;
136 
138 
143 
148 
151 
152 #if HAVE_TERMIOS_H
153 
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
157 #endif
158 
159 #if HAVE_PTHREADS
160 static void free_input_threads(void);
161 #endif
162 
163 /* sub2video hack:
164  Convert subtitles to video with alpha to insert them in filter graphs.
165  This is a temporary solution until libavfilter gets real subtitles support.
166  */
167 
169 {
170  int ret;
171  AVFrame *frame = ist->sub2video.frame;
172 
173  av_frame_unref(frame);
174  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
178  return ret;
179  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
180  return 0;
181 }
182 
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184  AVSubtitleRect *r)
185 {
186  uint32_t *pal, *dst2;
187  uint8_t *src, *src2;
188  int x, y;
189 
190  if (r->type != SUBTITLE_BITMAP) {
191  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192  return;
193  }
194  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196  r->x, r->y, r->w, r->h, w, h
197  );
198  return;
199  }
200 
201  dst += r->y * dst_linesize + r->x * 4;
202  src = r->data[0];
203  pal = (uint32_t *)r->data[1];
204  for (y = 0; y < r->h; y++) {
205  dst2 = (uint32_t *)dst;
206  src2 = src;
207  for (x = 0; x < r->w; x++)
208  *(dst2++) = pal[*(src2++)];
209  dst += dst_linesize;
210  src += r->linesize[0];
211  }
212 }
213 
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
215 {
216  AVFrame *frame = ist->sub2video.frame;
217  int i;
218 
219  av_assert1(frame->data[0]);
220  ist->sub2video.last_pts = frame->pts = pts;
221  for (i = 0; i < ist->nb_filters; i++)
225 }
226 
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
228 {
229  AVFrame *frame = ist->sub2video.frame;
230  int8_t *dst;
231  int dst_linesize;
232  int num_rects, i;
233  int64_t pts, end_pts;
234 
235  if (!frame)
236  return;
237  if (sub) {
238  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241  AV_TIME_BASE_Q, ist->st->time_base);
242  num_rects = sub->num_rects;
243  } else {
244  pts = ist->sub2video.end_pts;
245  end_pts = INT64_MAX;
246  num_rects = 0;
247  }
248  if (sub2video_get_blank_frame(ist) < 0) {
250  "Impossible to get a blank canvas.\n");
251  return;
252  }
253  dst = frame->data [0];
254  dst_linesize = frame->linesize[0];
255  for (i = 0; i < num_rects; i++)
256  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257  sub2video_push_ref(ist, pts);
258  ist->sub2video.end_pts = end_pts;
259 }
260 
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
262 {
263  InputFile *infile = input_files[ist->file_index];
264  int i, j, nb_reqs;
265  int64_t pts2;
266 
267  /* When a frame is read from a file, examine all sub2video streams in
268  the same file and send the sub2video frame again. Otherwise, decoded
269  video frames could be accumulating in the filter graph while a filter
270  (possibly overlay) is desperately waiting for a subtitle frame. */
271  for (i = 0; i < infile->nb_streams; i++) {
272  InputStream *ist2 = input_streams[infile->ist_index + i];
273  if (!ist2->sub2video.frame)
274  continue;
275  /* subtitles seem to be usually muxed ahead of other streams;
276  if not, subtracting a larger time here is necessary */
277  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278  /* do not send the heartbeat frame if the subtitle is already ahead */
279  if (pts2 <= ist2->sub2video.last_pts)
280  continue;
281  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282  sub2video_update(ist2, NULL);
283  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
285  if (nb_reqs)
286  sub2video_push_ref(ist2, pts2);
287  }
288 }
289 
290 static void sub2video_flush(InputStream *ist)
291 {
292  int i;
293 
294  if (ist->sub2video.end_pts < INT64_MAX)
295  sub2video_update(ist, NULL);
296  for (i = 0; i < ist->nb_filters; i++)
298 }
299 
300 /* end of sub2video hack */
301 
302 static void term_exit_sigsafe(void)
303 {
304 #if HAVE_TERMIOS_H
305  if(restore_tty)
306  tcsetattr (0, TCSANOW, &oldtty);
307 #endif
308 }
309 
310 void term_exit(void)
311 {
312  av_log(NULL, AV_LOG_QUIET, "%s", "");
314 }
315 
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
321 
322 static void
324 {
325  received_sigterm = sig;
328  if(received_nb_signals > 3) {
329  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330  strlen("Received > 3 system signals, hard exiting\n"));
331 
332  exit(123);
333  }
334 }
335 
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
338 {
339  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
340 
341  switch (fdwCtrlType)
342  {
343  case CTRL_C_EVENT:
344  case CTRL_BREAK_EVENT:
345  sigterm_handler(SIGINT);
346  return TRUE;
347 
348  case CTRL_CLOSE_EVENT:
349  case CTRL_LOGOFF_EVENT:
350  case CTRL_SHUTDOWN_EVENT:
351  sigterm_handler(SIGTERM);
352  /* Basically, with these 3 events, when we return from this method the
353  process is hard terminated, so stall as long as we need to
354  to try and let the main thread(s) clean up and gracefully terminate
355  (we have at most 5 seconds, but should be done far before that). */
356  while (!ffmpeg_exited) {
357  Sleep(0);
358  }
359  return TRUE;
360 
361  default:
362  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
363  return FALSE;
364  }
365 }
366 #endif
367 
368 void term_init(void)
369 {
370 #if HAVE_TERMIOS_H
372  struct termios tty;
373  if (tcgetattr (0, &tty) == 0) {
374  oldtty = tty;
375  restore_tty = 1;
376 
377  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
378  |INLCR|IGNCR|ICRNL|IXON);
379  tty.c_oflag |= OPOST;
380  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
381  tty.c_cflag &= ~(CSIZE|PARENB);
382  tty.c_cflag |= CS8;
383  tty.c_cc[VMIN] = 1;
384  tty.c_cc[VTIME] = 0;
385 
386  tcsetattr (0, TCSANOW, &tty);
387  }
388  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
389  }
390 #endif
391 
392  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
393  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
394 #ifdef SIGXCPU
395  signal(SIGXCPU, sigterm_handler);
396 #endif
397 #if HAVE_SETCONSOLECTRLHANDLER
398  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
399 #endif
400 }
401 
402 /* read a key without blocking */
403 static int read_key(void)
404 {
405  unsigned char ch;
406 #if HAVE_TERMIOS_H
407  int n = 1;
408  struct timeval tv;
409  fd_set rfds;
410 
411  FD_ZERO(&rfds);
412  FD_SET(0, &rfds);
413  tv.tv_sec = 0;
414  tv.tv_usec = 0;
415  n = select(1, &rfds, NULL, NULL, &tv);
416  if (n > 0) {
417  n = read(0, &ch, 1);
418  if (n == 1)
419  return ch;
420 
421  return n;
422  }
423 #elif HAVE_KBHIT
424 # if HAVE_PEEKNAMEDPIPE
425  static int is_pipe;
426  static HANDLE input_handle;
427  DWORD dw, nchars;
428  if(!input_handle){
429  input_handle = GetStdHandle(STD_INPUT_HANDLE);
430  is_pipe = !GetConsoleMode(input_handle, &dw);
431  }
432 
433  if (is_pipe) {
434  /* When running under a GUI, you will end here. */
435  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
436  // input pipe may have been closed by the program that ran ffmpeg
437  return -1;
438  }
439  //Read it
440  if(nchars != 0) {
441  read(0, &ch, 1);
442  return ch;
443  }else{
444  return -1;
445  }
446  }
447 # endif
448  if(kbhit())
449  return(getch());
450 #endif
451  return -1;
452 }
453 
454 static int decode_interrupt_cb(void *ctx)
455 {
457 }
458 
460 
461 static void ffmpeg_cleanup(int ret)
462 {
463  int i, j;
464 
465  if (do_benchmark) {
466  int maxrss = getmaxrss() / 1024;
467  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468  }
469 
470  for (i = 0; i < nb_filtergraphs; i++) {
471  FilterGraph *fg = filtergraphs[i];
473  for (j = 0; j < fg->nb_inputs; j++) {
474  av_freep(&fg->inputs[j]->name);
475  av_freep(&fg->inputs[j]);
476  }
477  av_freep(&fg->inputs);
478  for (j = 0; j < fg->nb_outputs; j++) {
479  av_freep(&fg->outputs[j]->name);
480  av_freep(&fg->outputs[j]);
481  }
482  av_freep(&fg->outputs);
483  av_freep(&fg->graph_desc);
484 
485  av_freep(&filtergraphs[i]);
486  }
487  av_freep(&filtergraphs);
488 
490 
491  /* close files */
492  for (i = 0; i < nb_output_files; i++) {
493  OutputFile *of = output_files[i];
495  if (!of)
496  continue;
497  s = of->ctx;
498  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
499  avio_closep(&s->pb);
501  av_dict_free(&of->opts);
502 
503  av_freep(&output_files[i]);
504  }
505  for (i = 0; i < nb_output_streams; i++) {
506  OutputStream *ost = output_streams[i];
507 
508  if (!ost)
509  continue;
510 
511  for (j = 0; j < ost->nb_bitstream_filters; j++)
512  av_bsf_free(&ost->bsf_ctx[j]);
513  av_freep(&ost->bsf_ctx);
515 
517  av_frame_free(&ost->last_frame);
518  av_dict_free(&ost->encoder_opts);
519 
520  av_parser_close(ost->parser);
522 
523  av_freep(&ost->forced_keyframes);
525  av_freep(&ost->avfilter);
526  av_freep(&ost->logfile_prefix);
527 
529  ost->audio_channels_mapped = 0;
530 
531  av_dict_free(&ost->sws_dict);
532 
535 
536  while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
537  AVPacket pkt;
538  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
539  av_packet_unref(&pkt);
540  }
542 
543  av_freep(&output_streams[i]);
544  }
545 #if HAVE_PTHREADS
546  free_input_threads();
547 #endif
548  for (i = 0; i < nb_input_files; i++) {
549  avformat_close_input(&input_files[i]->ctx);
550  av_freep(&input_files[i]);
551  }
552  for (i = 0; i < nb_input_streams; i++) {
553  InputStream *ist = input_streams[i];
554 
557  av_dict_free(&ist->decoder_opts);
560  av_freep(&ist->filters);
561  av_freep(&ist->hwaccel_device);
562  av_freep(&ist->dts_buffer);
563 
565 
566  av_freep(&input_streams[i]);
567  }
568 
569  if (vstats_file) {
570  if (fclose(vstats_file))
572  "Error closing vstats file, loss of information possible: %s\n",
573  av_err2str(AVERROR(errno)));
574  }
576 
577  av_freep(&input_streams);
578  av_freep(&input_files);
579  av_freep(&output_streams);
580  av_freep(&output_files);
581 
582  uninit_opts();
583 
585 
586  if (received_sigterm) {
587  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
588  (int) received_sigterm);
589  } else if (ret && transcode_init_done) {
590  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
591  }
592  term_exit();
593  ffmpeg_exited = 1;
594 }
595 
597 {
598  AVDictionaryEntry *t = NULL;
599 
600  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
602  }
603 }
604 
606 {
608  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
609  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
610  exit_program(1);
611  }
612 }
613 
614 static void abort_codec_experimental(AVCodec *c, int encoder)
615 {
616  exit_program(1);
617 }
618 
619 static void update_benchmark(const char *fmt, ...)
620 {
621  if (do_benchmark_all) {
622  int64_t t = getutime();
623  va_list va;
624  char buf[1024];
625 
626  if (fmt) {
627  va_start(va, fmt);
628  vsnprintf(buf, sizeof(buf), fmt, va);
629  va_end(va);
630  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
631  }
632  current_time = t;
633  }
634 }
635 
636 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
637 {
638  int i;
639  for (i = 0; i < nb_output_streams; i++) {
640  OutputStream *ost2 = output_streams[i];
641  ost2->finished |= ost == ost2 ? this_stream : others;
642  }
643 }
644 
646 {
647  AVFormatContext *s = of->ctx;
648  AVStream *st = ost->st;
649  int ret;
650 
651  if (!of->header_written) {
652  AVPacket tmp_pkt;
653  /* the muxer is not initialized yet, buffer the packet */
654  if (!av_fifo_space(ost->muxing_queue)) {
655  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
656  ost->max_muxing_queue_size);
657  if (new_size <= av_fifo_size(ost->muxing_queue)) {
659  "Too many packets buffered for output stream %d:%d.\n",
660  ost->file_index, ost->st->index);
661  exit_program(1);
662  }
663  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
664  if (ret < 0)
665  exit_program(1);
666  }
667  av_packet_move_ref(&tmp_pkt, pkt);
668  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
669  return;
670  }
671 
674  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
675 
676  /*
677  * Audio encoders may split the packets -- #frames in != #packets out.
678  * But there is no reordering, so we can limit the number of output packets
679  * by simply dropping them here.
680  * Counting encoded video frames needs to be done separately because of
681  * reordering, see do_video_out()
682  */
683  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
684  if (ost->frame_number >= ost->max_frames) {
685  av_packet_unref(pkt);
686  return;
687  }
688  ost->frame_number++;
689  }
690  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
691  int i;
693  NULL);
694  ost->quality = sd ? AV_RL32(sd) : -1;
695  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
696 
697  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
698  if (sd && i < sd[5])
699  ost->error[i] = AV_RL64(sd + 8 + 8*i);
700  else
701  ost->error[i] = -1;
702  }
703 
704  if (ost->frame_rate.num && ost->is_cfr) {
705  if (pkt->duration > 0)
706  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
707  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
708  ost->st->time_base);
709  }
710  }
711 
712  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
713  if (pkt->dts != AV_NOPTS_VALUE &&
714  pkt->pts != AV_NOPTS_VALUE &&
715  pkt->dts > pkt->pts) {
716  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
717  pkt->dts, pkt->pts,
718  ost->file_index, ost->st->index);
719  pkt->pts =
720  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
721  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
722  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
723  }
725  pkt->dts != AV_NOPTS_VALUE &&
726  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
727  ost->last_mux_dts != AV_NOPTS_VALUE) {
728  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
729  if (pkt->dts < max) {
730  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
731  av_log(s, loglevel, "Non-monotonous DTS in output stream "
732  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
733  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
734  if (exit_on_error) {
735  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
736  exit_program(1);
737  }
738  av_log(s, loglevel, "changing to %"PRId64". This may result "
739  "in incorrect timestamps in the output file.\n",
740  max);
741  if (pkt->pts >= pkt->dts)
742  pkt->pts = FFMAX(pkt->pts, max);
743  pkt->dts = max;
744  }
745  }
746  }
747  ost->last_mux_dts = pkt->dts;
748 
749  ost->data_size += pkt->size;
750  ost->packets_written++;
751 
752  pkt->stream_index = ost->index;
753 
754  if (debug_ts) {
755  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
756  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
758  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
759  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
760  pkt->size
761  );
762  }
763 
764  ret = av_interleaved_write_frame(s, pkt);
765  if (ret < 0) {
766  print_error("av_interleaved_write_frame()", ret);
767  main_return_code = 1;
769  }
770  av_packet_unref(pkt);
771 }
772 
774 {
775  OutputFile *of = output_files[ost->file_index];
776 
777  ost->finished |= ENCODER_FINISHED;
778  if (of->shortest) {
779  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
780  of->recording_time = FFMIN(of->recording_time, end);
781  }
782 }
783 
785 {
786  int ret = 0;
787 
788  /* apply the output bitstream filters, if any */
789  if (ost->nb_bitstream_filters) {
790  int idx;
791 
792  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
793  if (ret < 0)
794  goto finish;
795 
796  idx = 1;
797  while (idx) {
798  /* get a packet from the previous filter up the chain */
799  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
800  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
801  * the api states this shouldn't happen after init(). Propagate it here to the
802  * muxer and to the next filters in the chain to workaround this.
803  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
804  * par_out->extradata and adapt muxers accordingly to get rid of this. */
805  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
806  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
807  if (ret < 0)
808  goto finish;
809  ost->bsf_extradata_updated[idx - 1] |= 1;
810  }
811  if (ret == AVERROR(EAGAIN)) {
812  ret = 0;
813  idx--;
814  continue;
815  } else if (ret < 0)
816  goto finish;
817 
818  /* send it to the next filter down the chain or to the muxer */
819  if (idx < ost->nb_bitstream_filters) {
820  /* HACK/FIXME! - See above */
821  if (!(ost->bsf_extradata_updated[idx] & 2)) {
822  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
823  if (ret < 0)
824  goto finish;
825  ost->bsf_extradata_updated[idx] |= 2;
826  }
827  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
828  if (ret < 0)
829  goto finish;
830  idx++;
831  } else
832  write_packet(of, pkt, ost);
833  }
834  } else
835  write_packet(of, pkt, ost);
836 
837 finish:
838  if (ret < 0 && ret != AVERROR_EOF) {
839  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
840  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
841  if(exit_on_error)
842  exit_program(1);
843  }
844 }
845 
847 {
848  OutputFile *of = output_files[ost->file_index];
849 
850  if (of->recording_time != INT64_MAX &&
852  AV_TIME_BASE_Q) >= 0) {
853  close_output_stream(ost);
854  return 0;
855  }
856  return 1;
857 }
858 
859 static void do_audio_out(OutputFile *of, OutputStream *ost,
860  AVFrame *frame)
861 {
862  AVCodecContext *enc = ost->enc_ctx;
863  AVPacket pkt;
864  int ret;
865 
866  av_init_packet(&pkt);
867  pkt.data = NULL;
868  pkt.size = 0;
869 
870  if (!check_recording_time(ost))
871  return;
872 
873  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
874  frame->pts = ost->sync_opts;
875  ost->sync_opts = frame->pts + frame->nb_samples;
876  ost->samples_encoded += frame->nb_samples;
877  ost->frames_encoded++;
878 
879  av_assert0(pkt.size || !pkt.data);
881  if (debug_ts) {
882  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
883  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
884  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
885  enc->time_base.num, enc->time_base.den);
886  }
887 
888  ret = avcodec_send_frame(enc, frame);
889  if (ret < 0)
890  goto error;
891 
892  while (1) {
893  ret = avcodec_receive_packet(enc, &pkt);
894  if (ret == AVERROR(EAGAIN))
895  break;
896  if (ret < 0)
897  goto error;
898 
899  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
900 
901  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
902 
903  if (debug_ts) {
904  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
905  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
906  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
907  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
908  }
909 
910  output_packet(of, &pkt, ost);
911  }
912 
913  return;
914 error:
915  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
916  exit_program(1);
917 }
918 
919 static void do_subtitle_out(OutputFile *of,
920  OutputStream *ost,
921  AVSubtitle *sub)
922 {
923  int subtitle_out_max_size = 1024 * 1024;
924  int subtitle_out_size, nb, i;
925  AVCodecContext *enc;
926  AVPacket pkt;
927  int64_t pts;
928 
929  if (sub->pts == AV_NOPTS_VALUE) {
930  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
931  if (exit_on_error)
932  exit_program(1);
933  return;
934  }
935 
936  enc = ost->enc_ctx;
937 
938  if (!subtitle_out) {
939  subtitle_out = av_malloc(subtitle_out_max_size);
940  if (!subtitle_out) {
941  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
942  exit_program(1);
943  }
944  }
945 
946  /* Note: DVB subtitle need one packet to draw them and one other
947  packet to clear them */
948  /* XXX: signal it in the codec context ? */
950  nb = 2;
951  else
952  nb = 1;
953 
954  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
955  pts = sub->pts;
956  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
957  pts -= output_files[ost->file_index]->start_time;
958  for (i = 0; i < nb; i++) {
959  unsigned save_num_rects = sub->num_rects;
960 
961  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
962  if (!check_recording_time(ost))
963  return;
964 
965  sub->pts = pts;
966  // start_display_time is required to be 0
967  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
969  sub->start_display_time = 0;
970  if (i == 1)
971  sub->num_rects = 0;
972 
973  ost->frames_encoded++;
974 
975  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
976  subtitle_out_max_size, sub);
977  if (i == 1)
978  sub->num_rects = save_num_rects;
979  if (subtitle_out_size < 0) {
980  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
981  exit_program(1);
982  }
983 
984  av_init_packet(&pkt);
985  pkt.data = subtitle_out;
986  pkt.size = subtitle_out_size;
987  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
988  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
989  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
990  /* XXX: the pts correction is handled here. Maybe handling
991  it in the codec would be better */
992  if (i == 0)
993  pkt.pts += 90 * sub->start_display_time;
994  else
995  pkt.pts += 90 * sub->end_display_time;
996  }
997  pkt.dts = pkt.pts;
998  output_packet(of, &pkt, ost);
999  }
1000 }
1001 
1002 static void do_video_out(OutputFile *of,
1003  OutputStream *ost,
1004  AVFrame *next_picture,
1005  double sync_ipts)
1006 {
1007  int ret, format_video_sync;
1008  AVPacket pkt;
1009  AVCodecContext *enc = ost->enc_ctx;
1010  AVCodecParameters *mux_par = ost->st->codecpar;
1011  int nb_frames, nb0_frames, i;
1012  double delta, delta0;
1013  double duration = 0;
1014  int frame_size = 0;
1015  InputStream *ist = NULL;
1017 
1018  if (ost->source_index >= 0)
1019  ist = input_streams[ost->source_index];
1020 
1021  if (filter->inputs[0]->frame_rate.num > 0 &&
1022  filter->inputs[0]->frame_rate.den > 0)
1023  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1024 
1025  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1026  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1027 
1028  if (!ost->filters_script &&
1029  !ost->filters &&
1030  next_picture &&
1031  ist &&
1032  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1033  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1034  }
1035 
1036  if (!next_picture) {
1037  //end, flushing
1038  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1039  ost->last_nb0_frames[1],
1040  ost->last_nb0_frames[2]);
1041  } else {
1042  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1043  delta = delta0 + duration;
1044 
1045  /* by default, we output a single frame */
1046  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1047  nb_frames = 1;
1048 
1049  format_video_sync = video_sync_method;
1050  if (format_video_sync == VSYNC_AUTO) {
1051  if(!strcmp(of->ctx->oformat->name, "avi")) {
1052  format_video_sync = VSYNC_VFR;
1053  } else
1054  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1055  if ( ist
1056  && format_video_sync == VSYNC_CFR
1057  && input_files[ist->file_index]->ctx->nb_streams == 1
1058  && input_files[ist->file_index]->input_ts_offset == 0) {
1059  format_video_sync = VSYNC_VSCFR;
1060  }
1061  if (format_video_sync == VSYNC_CFR && copy_ts) {
1062  format_video_sync = VSYNC_VSCFR;
1063  }
1064  }
1065  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1066 
1067  if (delta0 < 0 &&
1068  delta > 0 &&
1069  format_video_sync != VSYNC_PASSTHROUGH &&
1070  format_video_sync != VSYNC_DROP) {
1071  if (delta0 < -0.6) {
1072  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1073  } else
1074  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1075  sync_ipts = ost->sync_opts;
1076  duration += delta0;
1077  delta0 = 0;
1078  }
1079 
1080  switch (format_video_sync) {
1081  case VSYNC_VSCFR:
1082  if (ost->frame_number == 0 && delta0 >= 0.5) {
1083  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1084  delta = duration;
1085  delta0 = 0;
1086  ost->sync_opts = lrint(sync_ipts);
1087  }
1088  case VSYNC_CFR:
1089  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1090  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1091  nb_frames = 0;
1092  } else if (delta < -1.1)
1093  nb_frames = 0;
1094  else if (delta > 1.1) {
1095  nb_frames = lrintf(delta);
1096  if (delta0 > 1.1)
1097  nb0_frames = lrintf(delta0 - 0.6);
1098  }
1099  break;
1100  case VSYNC_VFR:
1101  if (delta <= -0.6)
1102  nb_frames = 0;
1103  else if (delta > 0.6)
1104  ost->sync_opts = lrint(sync_ipts);
1105  break;
1106  case VSYNC_DROP:
1107  case VSYNC_PASSTHROUGH:
1108  ost->sync_opts = lrint(sync_ipts);
1109  break;
1110  default:
1111  av_assert0(0);
1112  }
1113  }
1114 
1115  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1116  nb0_frames = FFMIN(nb0_frames, nb_frames);
1117 
1118  memmove(ost->last_nb0_frames + 1,
1119  ost->last_nb0_frames,
1120  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1121  ost->last_nb0_frames[0] = nb0_frames;
1122 
1123  if (nb0_frames == 0 && ost->last_dropped) {
1124  nb_frames_drop++;
1126  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1127  ost->frame_number, ost->st->index, ost->last_frame->pts);
1128  }
1129  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1130  if (nb_frames > dts_error_threshold * 30) {
1131  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1132  nb_frames_drop++;
1133  return;
1134  }
1135  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1136  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1137  }
1138  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1139 
1140  /* duplicates frame if needed */
1141  for (i = 0; i < nb_frames; i++) {
1142  AVFrame *in_picture;
1143  av_init_packet(&pkt);
1144  pkt.data = NULL;
1145  pkt.size = 0;
1146 
1147  if (i < nb0_frames && ost->last_frame) {
1148  in_picture = ost->last_frame;
1149  } else
1150  in_picture = next_picture;
1151 
1152  if (!in_picture)
1153  return;
1154 
1155  in_picture->pts = ost->sync_opts;
1156 
1157 #if 1
1158  if (!check_recording_time(ost))
1159 #else
1160  if (ost->frame_number >= ost->max_frames)
1161 #endif
1162  return;
1163 
1164 #if FF_API_LAVF_FMT_RAWPICTURE
1165  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1166  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1167  /* raw pictures are written as AVPicture structure to
1168  avoid any copies. We support temporarily the older
1169  method. */
1170  if (in_picture->interlaced_frame)
1171  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1172  else
1173  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1174  pkt.data = (uint8_t *)in_picture;
1175  pkt.size = sizeof(AVPicture);
1176  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1177  pkt.flags |= AV_PKT_FLAG_KEY;
1178 
1179  output_packet(of, &pkt, ost);
1180  } else
1181 #endif
1182  {
1183  int forced_keyframe = 0;
1184  double pts_time;
1185 
1187  ost->top_field_first >= 0)
1188  in_picture->top_field_first = !!ost->top_field_first;
1189 
1190  if (in_picture->interlaced_frame) {
1191  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1192  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1193  else
1194  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1195  } else
1196  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1197 
1198  in_picture->quality = enc->global_quality;
1199  in_picture->pict_type = 0;
1200 
1201  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1202  in_picture->pts * av_q2d(enc->time_base) : NAN;
1203  if (ost->forced_kf_index < ost->forced_kf_count &&
1204  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1205  ost->forced_kf_index++;
1206  forced_keyframe = 1;
1207  } else if (ost->forced_keyframes_pexpr) {
1208  double res;
1209  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1212  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1218  res);
1219  if (res) {
1220  forced_keyframe = 1;
1226  }
1227 
1229  } else if ( ost->forced_keyframes
1230  && !strncmp(ost->forced_keyframes, "source", 6)
1231  && in_picture->key_frame==1) {
1232  forced_keyframe = 1;
1233  }
1234 
1235  if (forced_keyframe) {
1236  in_picture->pict_type = AV_PICTURE_TYPE_I;
1237  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1238  }
1239 
1241  if (debug_ts) {
1242  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1243  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1244  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1245  enc->time_base.num, enc->time_base.den);
1246  }
1247 
1248  ost->frames_encoded++;
1249 
1250  ret = avcodec_send_frame(enc, in_picture);
1251  if (ret < 0)
1252  goto error;
1253 
1254  while (1) {
1255  ret = avcodec_receive_packet(enc, &pkt);
1256  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1257  if (ret == AVERROR(EAGAIN))
1258  break;
1259  if (ret < 0)
1260  goto error;
1261 
1262  if (debug_ts) {
1263  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1264  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1265  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1266  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1267  }
1268 
1269  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1270  pkt.pts = ost->sync_opts;
1271 
1272  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1273 
1274  if (debug_ts) {
1275  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1276  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1277  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1278  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1279  }
1280 
1281  frame_size = pkt.size;
1282  output_packet(of, &pkt, ost);
1283 
1284  /* if two pass, output log */
1285  if (ost->logfile && enc->stats_out) {
1286  fprintf(ost->logfile, "%s", enc->stats_out);
1287  }
1288  }
1289  }
1290  ost->sync_opts++;
1291  /*
1292  * For video, number of frames in == number of packets out.
1293  * But there may be reordering, so we can't throw away frames on encoder
1294  * flush, we need to limit them here, before they go into encoder.
1295  */
1296  ost->frame_number++;
1297 
1298  if (vstats_filename && frame_size)
1299  do_video_stats(ost, frame_size);
1300  }
1301 
1302  if (!ost->last_frame)
1303  ost->last_frame = av_frame_alloc();
1304  av_frame_unref(ost->last_frame);
1305  if (next_picture && ost->last_frame)
1306  av_frame_ref(ost->last_frame, next_picture);
1307  else
1308  av_frame_free(&ost->last_frame);
1309 
1310  return;
1311 error:
1312  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1313  exit_program(1);
1314 }
1315 
1316 static double psnr(double d)
1317 {
1318  return -10.0 * log10(d);
1319 }
1320 
1322 {
1323  AVCodecContext *enc;
1324  int frame_number;
1325  double ti1, bitrate, avg_bitrate;
1326 
1327  /* this is executed just the first time do_video_stats is called */
1328  if (!vstats_file) {
1329  vstats_file = fopen(vstats_filename, "w");
1330  if (!vstats_file) {
1331  perror("fopen");
1332  exit_program(1);
1333  }
1334  }
1335 
1336  enc = ost->enc_ctx;
1337  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1338  frame_number = ost->st->nb_frames;
1339  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1340  ost->quality / (float)FF_QP2LAMBDA);
1341 
1342  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1343  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1344 
1345  fprintf(vstats_file,"f_size= %6d ", frame_size);
1346  /* compute pts value */
1347  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1348  if (ti1 < 0.01)
1349  ti1 = 0.01;
1350 
1351  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1352  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1353  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1354  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1355  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1356  }
1357 }
1358 
1360 {
1361  OutputFile *of = output_files[ost->file_index];
1362  int i;
1363 
1365 
1366  if (of->shortest) {
1367  for (i = 0; i < of->ctx->nb_streams; i++)
1368  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1369  }
1370 }
1371 
1372 /**
1373  * Get and encode new output from any of the filtergraphs, without causing
1374  * activity.
1375  *
1376  * @return 0 for success, <0 for severe errors
1377  */
1378 static int reap_filters(int flush)
1379 {
1380  AVFrame *filtered_frame = NULL;
1381  int i;
1382 
1383  /* Reap all buffers present in the buffer sinks */
1384  for (i = 0; i < nb_output_streams; i++) {
1385  OutputStream *ost = output_streams[i];
1386  OutputFile *of = output_files[ost->file_index];
1388  AVCodecContext *enc = ost->enc_ctx;
1389  int ret = 0;
1390 
1391  if (!ost->filter)
1392  continue;
1393  filter = ost->filter->filter;
1394 
1395  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1396  return AVERROR(ENOMEM);
1397  }
1398  filtered_frame = ost->filtered_frame;
1399 
1400  while (1) {
1401  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1402  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1404  if (ret < 0) {
1405  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1407  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1408  } else if (flush && ret == AVERROR_EOF) {
1409  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1410  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1411  }
1412  break;
1413  }
1414  if (ost->finished) {
1415  av_frame_unref(filtered_frame);
1416  continue;
1417  }
1418  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1419  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1420  AVRational tb = enc->time_base;
1421  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1422 
1423  tb.den <<= extra_bits;
1424  float_pts =
1425  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1426  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1427  float_pts /= 1 << extra_bits;
1428  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1429  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1430 
1431  filtered_frame->pts =
1432  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1433  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1434  }
1435  //if (ost->source_index >= 0)
1436  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1437 
1438  switch (filter->inputs[0]->type) {
1439  case AVMEDIA_TYPE_VIDEO:
1440  if (!ost->frame_aspect_ratio.num)
1441  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1442 
1443  if (debug_ts) {
1444  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1445  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1446  float_pts,
1447  enc->time_base.num, enc->time_base.den);
1448  }
1449 
1450  do_video_out(of, ost, filtered_frame, float_pts);
1451  break;
1452  case AVMEDIA_TYPE_AUDIO:
1453  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1454  enc->channels != av_frame_get_channels(filtered_frame)) {
1456  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1457  break;
1458  }
1459  do_audio_out(of, ost, filtered_frame);
1460  break;
1461  default:
1462  // TODO support subtitle filters
1463  av_assert0(0);
1464  }
1465 
1466  av_frame_unref(filtered_frame);
1467  }
1468  }
1469 
1470  return 0;
1471 }
1472 
1473 static void print_final_stats(int64_t total_size)
1474 {
1475  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1476  uint64_t subtitle_size = 0;
1477  uint64_t data_size = 0;
1478  float percent = -1.0;
1479  int i, j;
1480  int pass1_used = 1;
1481 
1482  for (i = 0; i < nb_output_streams; i++) {
1483  OutputStream *ost = output_streams[i];
1484  switch (ost->enc_ctx->codec_type) {
1485  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1486  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1487  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1488  default: other_size += ost->data_size; break;
1489  }
1490  extra_size += ost->enc_ctx->extradata_size;
1491  data_size += ost->data_size;
1494  pass1_used = 0;
1495  }
1496 
1497  if (data_size && total_size>0 && total_size >= data_size)
1498  percent = 100.0 * (total_size - data_size) / data_size;
1499 
1500  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1501  video_size / 1024.0,
1502  audio_size / 1024.0,
1503  subtitle_size / 1024.0,
1504  other_size / 1024.0,
1505  extra_size / 1024.0);
1506  if (percent >= 0.0)
1507  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1508  else
1509  av_log(NULL, AV_LOG_INFO, "unknown");
1510  av_log(NULL, AV_LOG_INFO, "\n");
1511 
1512  /* print verbose per-stream stats */
1513  for (i = 0; i < nb_input_files; i++) {
1514  InputFile *f = input_files[i];
1515  uint64_t total_packets = 0, total_size = 0;
1516 
1517  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1518  i, f->ctx->filename);
1519 
1520  for (j = 0; j < f->nb_streams; j++) {
1521  InputStream *ist = input_streams[f->ist_index + j];
1522  enum AVMediaType type = ist->dec_ctx->codec_type;
1523 
1524  total_size += ist->data_size;
1525  total_packets += ist->nb_packets;
1526 
1527  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1528  i, j, media_type_string(type));
1529  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1530  ist->nb_packets, ist->data_size);
1531 
1532  if (ist->decoding_needed) {
1533  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1534  ist->frames_decoded);
1535  if (type == AVMEDIA_TYPE_AUDIO)
1536  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1537  av_log(NULL, AV_LOG_VERBOSE, "; ");
1538  }
1539 
1540  av_log(NULL, AV_LOG_VERBOSE, "\n");
1541  }
1542 
1543  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1544  total_packets, total_size);
1545  }
1546 
1547  for (i = 0; i < nb_output_files; i++) {
1548  OutputFile *of = output_files[i];
1549  uint64_t total_packets = 0, total_size = 0;
1550 
1551  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1552  i, of->ctx->filename);
1553 
1554  for (j = 0; j < of->ctx->nb_streams; j++) {
1555  OutputStream *ost = output_streams[of->ost_index + j];
1556  enum AVMediaType type = ost->enc_ctx->codec_type;
1557 
1558  total_size += ost->data_size;
1559  total_packets += ost->packets_written;
1560 
1561  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1562  i, j, media_type_string(type));
1563  if (ost->encoding_needed) {
1564  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1565  ost->frames_encoded);
1566  if (type == AVMEDIA_TYPE_AUDIO)
1567  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1568  av_log(NULL, AV_LOG_VERBOSE, "; ");
1569  }
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1572  ost->packets_written, ost->data_size);
1573 
1574  av_log(NULL, AV_LOG_VERBOSE, "\n");
1575  }
1576 
1577  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1578  total_packets, total_size);
1579  }
1580  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1581  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1582  if (pass1_used) {
1583  av_log(NULL, AV_LOG_WARNING, "\n");
1584  } else {
1585  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1586  }
1587  }
1588 }
1589 
1590 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1591 {
1592  char buf[1024];
1593  AVBPrint buf_script;
1594  OutputStream *ost;
1595  AVFormatContext *oc;
1596  int64_t total_size;
1597  AVCodecContext *enc;
1598  int frame_number, vid, i;
1599  double bitrate;
1600  double speed;
1601  int64_t pts = INT64_MIN + 1;
1602  static int64_t last_time = -1;
1603  static int qp_histogram[52];
1604  int hours, mins, secs, us;
1605  int ret;
1606  float t;
1607 
1608  if (!print_stats && !is_last_report && !progress_avio)
1609  return;
1610 
1611  if (!is_last_report) {
1612  if (last_time == -1) {
1613  last_time = cur_time;
1614  return;
1615  }
1616  if ((cur_time - last_time) < 500000)
1617  return;
1618  last_time = cur_time;
1619  }
1620 
1621  t = (cur_time-timer_start) / 1000000.0;
1622 
1623 
1624  oc = output_files[0]->ctx;
1625 
1626  total_size = avio_size(oc->pb);
1627  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1628  total_size = avio_tell(oc->pb);
1629 
1630  buf[0] = '\0';
1631  vid = 0;
1632  av_bprint_init(&buf_script, 0, 1);
1633  for (i = 0; i < nb_output_streams; i++) {
1634  float q = -1;
1635  ost = output_streams[i];
1636  enc = ost->enc_ctx;
1637  if (!ost->stream_copy)
1638  q = ost->quality / (float) FF_QP2LAMBDA;
1639 
1640  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1641  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1642  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1643  ost->file_index, ost->index, q);
1644  }
1645  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1646  float fps;
1647 
1648  frame_number = ost->frame_number;
1649  fps = t > 1 ? frame_number / t : 0;
1650  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1651  frame_number, fps < 9.95, fps, q);
1652  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1653  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1654  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1655  ost->file_index, ost->index, q);
1656  if (is_last_report)
1657  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1658  if (qp_hist) {
1659  int j;
1660  int qp = lrintf(q);
1661  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1662  qp_histogram[qp]++;
1663  for (j = 0; j < 32; j++)
1664  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1665  }
1666 
1667  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1668  int j;
1669  double error, error_sum = 0;
1670  double scale, scale_sum = 0;
1671  double p;
1672  char type[3] = { 'Y','U','V' };
1673  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1674  for (j = 0; j < 3; j++) {
1675  if (is_last_report) {
1676  error = enc->error[j];
1677  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1678  } else {
1679  error = ost->error[j];
1680  scale = enc->width * enc->height * 255.0 * 255.0;
1681  }
1682  if (j)
1683  scale /= 4;
1684  error_sum += error;
1685  scale_sum += scale;
1686  p = psnr(error / scale);
1687  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1688  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1689  ost->file_index, ost->index, type[j] | 32, p);
1690  }
1691  p = psnr(error_sum / scale_sum);
1692  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1693  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1694  ost->file_index, ost->index, p);
1695  }
1696  vid = 1;
1697  }
1698  /* compute min output value */
1700  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1701  ost->st->time_base, AV_TIME_BASE_Q));
1702  if (is_last_report)
1703  nb_frames_drop += ost->last_dropped;
1704  }
1705 
1706  secs = FFABS(pts) / AV_TIME_BASE;
1707  us = FFABS(pts) % AV_TIME_BASE;
1708  mins = secs / 60;
1709  secs %= 60;
1710  hours = mins / 60;
1711  mins %= 60;
1712 
1713  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1714  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1715 
1716  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1717  "size=N/A time=");
1718  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1719  "size=%8.0fkB time=", total_size / 1024.0);
1720  if (pts < 0)
1721  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1722  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1723  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1724  (100 * us) / AV_TIME_BASE);
1725 
1726  if (bitrate < 0) {
1727  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1728  av_bprintf(&buf_script, "bitrate=N/A\n");
1729  }else{
1730  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1731  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1732  }
1733 
1734  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1735  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1736  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1737  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1738  hours, mins, secs, us);
1739 
1741  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1743  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1744  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1745 
1746  if (speed < 0) {
1747  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1748  av_bprintf(&buf_script, "speed=N/A\n");
1749  } else {
1750  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1751  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1752  }
1753 
1754  if (print_stats || is_last_report) {
1755  const char end = is_last_report ? '\n' : '\r';
1756  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1757  fprintf(stderr, "%s %c", buf, end);
1758  } else
1759  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1760 
1761  fflush(stderr);
1762  }
1763 
1764  if (progress_avio) {
1765  av_bprintf(&buf_script, "progress=%s\n",
1766  is_last_report ? "end" : "continue");
1767  avio_write(progress_avio, buf_script.str,
1768  FFMIN(buf_script.len, buf_script.size - 1));
1769  avio_flush(progress_avio);
1770  av_bprint_finalize(&buf_script, NULL);
1771  if (is_last_report) {
1772  if ((ret = avio_closep(&progress_avio)) < 0)
1774  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1775  }
1776  }
1777 
1778  if (is_last_report)
1779  print_final_stats(total_size);
1780 }
1781 
1782 static void flush_encoders(void)
1783 {
1784  int i, ret;
1785 
1786  for (i = 0; i < nb_output_streams; i++) {
1787  OutputStream *ost = output_streams[i];
1788  AVCodecContext *enc = ost->enc_ctx;
1789  OutputFile *of = output_files[ost->file_index];
1790  int stop_encoding = 0;
1791 
1792  if (!ost->encoding_needed)
1793  continue;
1794 
1795  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1796  continue;
1797 #if FF_API_LAVF_FMT_RAWPICTURE
1798  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1799  continue;
1800 #endif
1801 
1803  continue;
1804 
1805  avcodec_send_frame(enc, NULL);
1806 
1807  for (;;) {
1808  const char *desc = NULL;
1809 
1810  switch (enc->codec_type) {
1811  case AVMEDIA_TYPE_AUDIO:
1812  desc = "audio";
1813  break;
1814  case AVMEDIA_TYPE_VIDEO:
1815  desc = "video";
1816  break;
1817  default:
1818  av_assert0(0);
1819  }
1820 
1821  if (1) {
1822  AVPacket pkt;
1823  int pkt_size;
1824  av_init_packet(&pkt);
1825  pkt.data = NULL;
1826  pkt.size = 0;
1827 
1829  ret = avcodec_receive_packet(enc, &pkt);
1830  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1831  if (ret < 0 && ret != AVERROR_EOF) {
1832  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1833  desc,
1834  av_err2str(ret));
1835  exit_program(1);
1836  }
1837  if (ost->logfile && enc->stats_out) {
1838  fprintf(ost->logfile, "%s", enc->stats_out);
1839  }
1840  if (ret == AVERROR_EOF) {
1841  stop_encoding = 1;
1842  break;
1843  }
1844  if (ost->finished & MUXER_FINISHED) {
1845  av_packet_unref(&pkt);
1846  continue;
1847  }
1848  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1849  pkt_size = pkt.size;
1850  output_packet(of, &pkt, ost);
1852  do_video_stats(ost, pkt_size);
1853  }
1854  }
1855 
1856  if (stop_encoding)
1857  break;
1858  }
1859  }
1860 }
1861 
1862 /*
1863  * Check whether a packet from ist should be written into ost at this time
1864  */
1866 {
1867  OutputFile *of = output_files[ost->file_index];
1868  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1869 
1870  if (ost->source_index != ist_index)
1871  return 0;
1872 
1873  if (ost->finished)
1874  return 0;
1875 
1876  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1877  return 0;
1878 
1879  return 1;
1880 }
1881 
1882 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1883 {
1884  OutputFile *of = output_files[ost->file_index];
1885  InputFile *f = input_files [ist->file_index];
1886  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1887  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1888  AVPicture pict;
1889  AVPacket opkt;
1890 
1891  av_init_packet(&opkt);
1892 
1893  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1895  return;
1896 
1897  if (!ost->frame_number && !ost->copy_prior_start) {
1898  int64_t comp_start = start_time;
1899  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1900  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1901  if (pkt->pts == AV_NOPTS_VALUE ?
1902  ist->pts < comp_start :
1903  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1904  return;
1905  }
1906 
1907  if (of->recording_time != INT64_MAX &&
1908  ist->pts >= of->recording_time + start_time) {
1909  close_output_stream(ost);
1910  return;
1911  }
1912 
1913  if (f->recording_time != INT64_MAX) {
1914  start_time = f->ctx->start_time;
1915  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1916  start_time += f->start_time;
1917  if (ist->pts >= f->recording_time + start_time) {
1918  close_output_stream(ost);
1919  return;
1920  }
1921  }
1922 
1923  /* force the input stream PTS */
1924  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1925  ost->sync_opts++;
1926 
1927  if (pkt->pts != AV_NOPTS_VALUE)
1928  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1929  else
1930  opkt.pts = AV_NOPTS_VALUE;
1931 
1932  if (pkt->dts == AV_NOPTS_VALUE)
1933  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1934  else
1935  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1936  opkt.dts -= ost_tb_start_time;
1937 
1938  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1940  if(!duration)
1941  duration = ist->dec_ctx->frame_size;
1942  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1944  ost->st->time_base) - ost_tb_start_time;
1945  }
1946 
1947  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1948  opkt.flags = pkt->flags;
1949  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1950  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1951  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1952  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1953  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1954  ) {
1955  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1956  &opkt.data, &opkt.size,
1957  pkt->data, pkt->size,
1959  if (ret < 0) {
1960  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1961  av_err2str(ret));
1962  exit_program(1);
1963  }
1964  if (ret) {
1965  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1966  if (!opkt.buf)
1967  exit_program(1);
1968  }
1969  } else {
1970  opkt.data = pkt->data;
1971  opkt.size = pkt->size;
1972  }
1973  av_copy_packet_side_data(&opkt, pkt);
1974 
1975 #if FF_API_LAVF_FMT_RAWPICTURE
1976  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1977  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1978  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1979  /* store AVPicture in AVPacket, as expected by the output format */
1980  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1981  if (ret < 0) {
1982  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1983  av_err2str(ret));
1984  exit_program(1);
1985  }
1986  opkt.data = (uint8_t *)&pict;
1987  opkt.size = sizeof(AVPicture);
1988  opkt.flags |= AV_PKT_FLAG_KEY;
1989  }
1990 #endif
1991 
1992  output_packet(of, &opkt, ost);
1993 }
1994 
1996 {
1997  AVCodecContext *dec = ist->dec_ctx;
1998 
1999  if (!dec->channel_layout) {
2000  char layout_name[256];
2001 
2002  if (dec->channels > ist->guess_layout_max)
2003  return 0;
2005  if (!dec->channel_layout)
2006  return 0;
2007  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2008  dec->channels, dec->channel_layout);
2009  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2010  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2011  }
2012  return 1;
2013 }
2014 
2015 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2016 {
2017  if (*got_output || ret<0)
2018  decode_error_stat[ret<0] ++;
2019 
2020  if (ret < 0 && exit_on_error)
2021  exit_program(1);
2022 
2023  if (exit_on_error && *got_output && ist) {
2025  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2026  exit_program(1);
2027  }
2028  }
2029 }
2030 
2031 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2032 // There is the following difference: if you got a frame, you must call
2033 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2034 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2035 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2036 {
2037  int ret;
2038 
2039  *got_frame = 0;
2040 
2041  if (pkt) {
2042  ret = avcodec_send_packet(avctx, pkt);
2043  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2044  // decoded frames with avcodec_receive_frame() until done.
2045  if (ret < 0 && ret != AVERROR_EOF)
2046  return ret;
2047  }
2048 
2049  ret = avcodec_receive_frame(avctx, frame);
2050  if (ret < 0 && ret != AVERROR(EAGAIN))
2051  return ret;
2052  if (ret >= 0)
2053  *got_frame = 1;
2054 
2055  return 0;
2056 }
2057 
2058 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2059 {
2060  AVFrame *decoded_frame, *f;
2061  AVCodecContext *avctx = ist->dec_ctx;
2062  int i, ret, err = 0, resample_changed;
2063  AVRational decoded_frame_tb;
2064 
2065  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2066  return AVERROR(ENOMEM);
2067  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2068  return AVERROR(ENOMEM);
2069  decoded_frame = ist->decoded_frame;
2070 
2072  ret = decode(avctx, decoded_frame, got_output, pkt);
2073  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2074 
2075  if (ret >= 0 && avctx->sample_rate <= 0) {
2076  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2077  ret = AVERROR_INVALIDDATA;
2078  }
2079 
2080  if (ret != AVERROR_EOF)
2081  check_decode_result(ist, got_output, ret);
2082 
2083  if (!*got_output || ret < 0)
2084  return ret;
2085 
2086  ist->samples_decoded += decoded_frame->nb_samples;
2087  ist->frames_decoded++;
2088 
2089 #if 1
2090  /* increment next_dts to use for the case where the input stream does not
2091  have timestamps or there are multiple frames in the packet */
2092  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2093  avctx->sample_rate;
2094  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2095  avctx->sample_rate;
2096 #endif
2097 
2098  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2099  ist->resample_channels != avctx->channels ||
2100  ist->resample_channel_layout != decoded_frame->channel_layout ||
2101  ist->resample_sample_rate != decoded_frame->sample_rate;
2102  if (resample_changed) {
2103  char layout1[64], layout2[64];
2104 
2105  if (!guess_input_channel_layout(ist)) {
2106  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2107  "layout for Input Stream #%d.%d\n", ist->file_index,
2108  ist->st->index);
2109  exit_program(1);
2110  }
2111  decoded_frame->channel_layout = avctx->channel_layout;
2112 
2113  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2115  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2116  decoded_frame->channel_layout);
2117 
2119  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2120  ist->file_index, ist->st->index,
2122  ist->resample_channels, layout1,
2123  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2124  avctx->channels, layout2);
2125 
2126  ist->resample_sample_fmt = decoded_frame->format;
2127  ist->resample_sample_rate = decoded_frame->sample_rate;
2128  ist->resample_channel_layout = decoded_frame->channel_layout;
2129  ist->resample_channels = avctx->channels;
2130 
2131  for (i = 0; i < nb_filtergraphs; i++)
2132  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2133  FilterGraph *fg = filtergraphs[i];
2134  if (configure_filtergraph(fg) < 0) {
2135  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2136  exit_program(1);
2137  }
2138  }
2139  }
2140 
2141  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2142  decoded_frame_tb = ist->st->time_base;
2143  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2144  decoded_frame->pts = pkt->pts;
2145  decoded_frame_tb = ist->st->time_base;
2146  }else {
2147  decoded_frame->pts = ist->dts;
2148  decoded_frame_tb = AV_TIME_BASE_Q;
2149  }
2150  if (decoded_frame->pts != AV_NOPTS_VALUE)
2151  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2152  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2153  (AVRational){1, avctx->sample_rate});
2154  ist->nb_samples = decoded_frame->nb_samples;
2155  for (i = 0; i < ist->nb_filters; i++) {
2156  if (i < ist->nb_filters - 1) {
2157  f = ist->filter_frame;
2158  err = av_frame_ref(f, decoded_frame);
2159  if (err < 0)
2160  break;
2161  } else
2162  f = decoded_frame;
2163  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2165  if (err == AVERROR_EOF)
2166  err = 0; /* ignore */
2167  if (err < 0)
2168  break;
2169  }
2170  decoded_frame->pts = AV_NOPTS_VALUE;
2171 
2172  av_frame_unref(ist->filter_frame);
2173  av_frame_unref(decoded_frame);
2174  return err < 0 ? err : ret;
2175 }
2176 
2177 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2178 {
2179  AVFrame *decoded_frame, *f;
2180  int i, ret = 0, err = 0, resample_changed;
2181  int64_t best_effort_timestamp;
2182  int64_t dts = AV_NOPTS_VALUE;
2183  AVRational *frame_sample_aspect;
2184  AVPacket avpkt;
2185 
2186  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2187  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2188  // skip the packet.
2189  if (!eof && pkt && pkt->size == 0)
2190  return 0;
2191 
2192  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2193  return AVERROR(ENOMEM);
2194  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2195  return AVERROR(ENOMEM);
2196  decoded_frame = ist->decoded_frame;
2197  if (ist->dts != AV_NOPTS_VALUE)
2198  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2199  if (pkt) {
2200  avpkt = *pkt;
2201  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2202  }
2203 
2204  // The old code used to set dts on the drain packet, which does not work
2205  // with the new API anymore.
2206  if (eof) {
2207  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2208  if (!new)
2209  return AVERROR(ENOMEM);
2210  ist->dts_buffer = new;
2211  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2212  }
2213 
2215  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2216  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2217 
2218  // The following line may be required in some cases where there is no parser
2219  // or the parser does not has_b_frames correctly
2220  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2221  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2222  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2223  } else
2225  "video_delay is larger in decoder than demuxer %d > %d.\n"
2226  "If you want to help, upload a sample "
2227  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2228  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2229  ist->dec_ctx->has_b_frames,
2230  ist->st->codecpar->video_delay);
2231  }
2232 
2233  if (ret != AVERROR_EOF)
2234  check_decode_result(ist, got_output, ret);
2235 
2236  if (*got_output && ret >= 0) {
2237  if (ist->dec_ctx->width != decoded_frame->width ||
2238  ist->dec_ctx->height != decoded_frame->height ||
2239  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2240  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2241  decoded_frame->width,
2242  decoded_frame->height,
2243  decoded_frame->format,
2244  ist->dec_ctx->width,
2245  ist->dec_ctx->height,
2246  ist->dec_ctx->pix_fmt);
2247  }
2248  }
2249 
2250  if (!*got_output || ret < 0)
2251  return ret;
2252 
2253  if(ist->top_field_first>=0)
2254  decoded_frame->top_field_first = ist->top_field_first;
2255 
2256  ist->frames_decoded++;
2257 
2258  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2259  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2260  if (err < 0)
2261  goto fail;
2262  }
2263  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2264 
2265  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2266 
2267  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2268  best_effort_timestamp = ist->dts_buffer[0];
2269 
2270  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2271  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2272  ist->nb_dts_buffer--;
2273  }
2274 
2275  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2276  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2277 
2278  if (ts != AV_NOPTS_VALUE)
2279  ist->next_pts = ist->pts = ts;
2280  }
2281 
2282  if (debug_ts) {
2283  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2284  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2285  ist->st->index, av_ts2str(decoded_frame->pts),
2286  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2287  best_effort_timestamp,
2288  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2289  decoded_frame->key_frame, decoded_frame->pict_type,
2290  ist->st->time_base.num, ist->st->time_base.den);
2291  }
2292 
2293  if (ist->st->sample_aspect_ratio.num)
2294  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2295 
2296  resample_changed = ist->resample_width != decoded_frame->width ||
2297  ist->resample_height != decoded_frame->height ||
2298  ist->resample_pix_fmt != decoded_frame->format;
2299  if (resample_changed) {
2300  av_log(NULL, AV_LOG_INFO,
2301  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2302  ist->file_index, ist->st->index,
2304  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2305 
2306  ist->resample_width = decoded_frame->width;
2307  ist->resample_height = decoded_frame->height;
2308  ist->resample_pix_fmt = decoded_frame->format;
2309 
2310  for (i = 0; i < nb_filtergraphs; i++) {
2311  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2312  configure_filtergraph(filtergraphs[i]) < 0) {
2313  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2314  exit_program(1);
2315  }
2316  }
2317  }
2318 
2319  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2320  for (i = 0; i < ist->nb_filters; i++) {
2321  if (!frame_sample_aspect->num)
2322  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2323 
2324  if (i < ist->nb_filters - 1) {
2325  f = ist->filter_frame;
2326  err = av_frame_ref(f, decoded_frame);
2327  if (err < 0)
2328  break;
2329  } else
2330  f = decoded_frame;
2332  if (err == AVERROR_EOF) {
2333  err = 0; /* ignore */
2334  } else if (err < 0) {
2335  av_log(NULL, AV_LOG_FATAL,
2336  "Failed to inject frame into filter network: %s\n", av_err2str(err));
2337  exit_program(1);
2338  }
2339  }
2340 
2341 fail:
2343  av_frame_unref(decoded_frame);
2344  return err < 0 ? err : ret;
2345 }
2346 
2347 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2348 {
2349  AVSubtitle subtitle;
2350  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2351  &subtitle, got_output, pkt);
2352 
2353  check_decode_result(NULL, got_output, ret);
2354 
2355  if (ret < 0 || !*got_output) {
2356  if (!pkt->size)
2357  sub2video_flush(ist);
2358  return ret;
2359  }
2360 
2361  if (ist->fix_sub_duration) {
2362  int end = 1;
2363  if (ist->prev_sub.got_output) {
2364  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2365  1000, AV_TIME_BASE);
2366  if (end < ist->prev_sub.subtitle.end_display_time) {
2367  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2368  "Subtitle duration reduced from %d to %d%s\n",
2370  end <= 0 ? ", dropping it" : "");
2372  }
2373  }
2374  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2375  FFSWAP(int, ret, ist->prev_sub.ret);
2376  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2377  if (end <= 0)
2378  goto out;
2379  }
2380 
2381  if (!*got_output)
2382  return ret;
2383 
2384  sub2video_update(ist, &subtitle);
2385 
2386  if (!subtitle.num_rects)
2387  goto out;
2388 
2389  ist->frames_decoded++;
2390 
2391  for (i = 0; i < nb_output_streams; i++) {
2392  OutputStream *ost = output_streams[i];
2393 
2394  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2395  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2396  continue;
2397 
2398  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2399  }
2400 
2401 out:
2402  avsubtitle_free(&subtitle);
2403  return ret;
2404 }
2405 
2407 {
2408  int i, ret;
2409  for (i = 0; i < ist->nb_filters; i++) {
2410  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2411  if (ret < 0)
2412  return ret;
2413  }
2414  return 0;
2415 }
2416 
2417 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2418 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2419 {
2420  int ret = 0, i;
2421  int repeating = 0;
2422  int eof_reached = 0;
2423 
2424  AVPacket avpkt;
2425  if (!ist->saw_first_ts) {
2426  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2427  ist->pts = 0;
2428  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2429  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2430  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2431  }
2432  ist->saw_first_ts = 1;
2433  }
2434 
2435  if (ist->next_dts == AV_NOPTS_VALUE)
2436  ist->next_dts = ist->dts;
2437  if (ist->next_pts == AV_NOPTS_VALUE)
2438  ist->next_pts = ist->pts;
2439 
2440  if (!pkt) {
2441  /* EOF handling */
2442  av_init_packet(&avpkt);
2443  avpkt.data = NULL;
2444  avpkt.size = 0;
2445  } else {
2446  avpkt = *pkt;
2447  }
2448 
2449  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2450  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2451  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2452  ist->next_pts = ist->pts = ist->dts;
2453  }
2454 
2455  // while we have more to decode or while the decoder did output something on EOF
2456  while (ist->decoding_needed) {
2457  int duration = 0;
2458  int got_output = 0;
2459 
2460  ist->pts = ist->next_pts;
2461  ist->dts = ist->next_dts;
2462 
2463  switch (ist->dec_ctx->codec_type) {
2464  case AVMEDIA_TYPE_AUDIO:
2465  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2466  break;
2467  case AVMEDIA_TYPE_VIDEO:
2468  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2469  if (!repeating || !pkt || got_output) {
2470  if (pkt && pkt->duration) {
2471  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2472  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2474  duration = ((int64_t)AV_TIME_BASE *
2475  ist->dec_ctx->framerate.den * ticks) /
2477  }
2478 
2479  if(ist->dts != AV_NOPTS_VALUE && duration) {
2480  ist->next_dts += duration;
2481  }else
2482  ist->next_dts = AV_NOPTS_VALUE;
2483  }
2484 
2485  if (got_output)
2486  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2487  break;
2488  case AVMEDIA_TYPE_SUBTITLE:
2489  if (repeating)
2490  break;
2491  ret = transcode_subtitles(ist, &avpkt, &got_output);
2492  if (!pkt && ret >= 0)
2493  ret = AVERROR_EOF;
2494  break;
2495  default:
2496  return -1;
2497  }
2498 
2499  if (ret == AVERROR_EOF) {
2500  eof_reached = 1;
2501  break;
2502  }
2503 
2504  if (ret < 0) {
2505  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2506  ist->file_index, ist->st->index, av_err2str(ret));
2507  if (exit_on_error)
2508  exit_program(1);
2509  // Decoding might not terminate if we're draining the decoder, and
2510  // the decoder keeps returning an error.
2511  // This should probably be considered a libavcodec issue.
2512  // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2513  if (!pkt)
2514  eof_reached = 1;
2515  break;
2516  }
2517 
2518  if (!got_output)
2519  break;
2520 
2521  // During draining, we might get multiple output frames in this loop.
2522  // ffmpeg.c does not drain the filter chain on configuration changes,
2523  // which means if we send multiple frames at once to the filters, and
2524  // one of those frames changes configuration, the buffered frames will
2525  // be lost. This can upset certain FATE tests.
2526  // Decode only 1 frame per call on EOF to appease these FATE tests.
2527  // The ideal solution would be to rewrite decoding to use the new
2528  // decoding API in a better way.
2529  if (!pkt)
2530  break;
2531 
2532  repeating = 1;
2533  }
2534 
2535  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2536  /* except when looping we need to flush but not to send an EOF */
2537  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2538  int ret = send_filter_eof(ist);
2539  if (ret < 0) {
2540  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2541  exit_program(1);
2542  }
2543  }
2544 
2545  /* handle stream copy */
2546  if (!ist->decoding_needed) {
2547  ist->dts = ist->next_dts;
2548  switch (ist->dec_ctx->codec_type) {
2549  case AVMEDIA_TYPE_AUDIO:
2550  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2551  ist->dec_ctx->sample_rate;
2552  break;
2553  case AVMEDIA_TYPE_VIDEO:
2554  if (ist->framerate.num) {
2555  // TODO: Remove work-around for c99-to-c89 issue 7
2556  AVRational time_base_q = AV_TIME_BASE_Q;
2557  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2558  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2559  } else if (pkt->duration) {
2560  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2561  } else if(ist->dec_ctx->framerate.num != 0) {
2562  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2563  ist->next_dts += ((int64_t)AV_TIME_BASE *
2564  ist->dec_ctx->framerate.den * ticks) /
2566  }
2567  break;
2568  }
2569  ist->pts = ist->dts;
2570  ist->next_pts = ist->next_dts;
2571  }
2572  for (i = 0; pkt && i < nb_output_streams; i++) {
2573  OutputStream *ost = output_streams[i];
2574 
2575  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2576  continue;
2577 
2578  do_streamcopy(ist, ost, pkt);
2579  }
2580 
2581  return !eof_reached;
2582 }
2583 
2584 static void print_sdp(void)
2585 {
2586  char sdp[16384];
2587  int i;
2588  int j;
2589  AVIOContext *sdp_pb;
2590  AVFormatContext **avc;
2591 
2592  for (i = 0; i < nb_output_files; i++) {
2593  if (!output_files[i]->header_written)
2594  return;
2595  }
2596 
2597  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2598  if (!avc)
2599  exit_program(1);
2600  for (i = 0, j = 0; i < nb_output_files; i++) {
2601  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2602  avc[j] = output_files[i]->ctx;
2603  j++;
2604  }
2605  }
2606 
2607  if (!j)
2608  goto fail;
2609 
2610  av_sdp_create(avc, j, sdp, sizeof(sdp));
2611 
2612  if (!sdp_filename) {
2613  printf("SDP:\n%s\n", sdp);
2614  fflush(stdout);
2615  } else {
2616  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2617  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2618  } else {
2619  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2620  avio_closep(&sdp_pb);
2622  }
2623  }
2624 
2625 fail:
2626  av_freep(&avc);
2627 }
2628 
2630 {
2631  int i;
2632  for (i = 0; hwaccels[i].name; i++)
2633  if (hwaccels[i].pix_fmt == pix_fmt)
2634  return &hwaccels[i];
2635  return NULL;
2636 }
2637 
2639 {
2640  InputStream *ist = s->opaque;
2641  const enum AVPixelFormat *p;
2642  int ret;
2643 
2644  for (p = pix_fmts; *p != -1; p++) {
2646  const HWAccel *hwaccel;
2647 
2648  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2649  break;
2650 
2651  hwaccel = get_hwaccel(*p);
2652  if (!hwaccel ||
2653  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2654  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2655  continue;
2656 
2657  ret = hwaccel->init(s);
2658  if (ret < 0) {
2659  if (ist->hwaccel_id == hwaccel->id) {
2661  "%s hwaccel requested for input stream #%d:%d, "
2662  "but cannot be initialized.\n", hwaccel->name,
2663  ist->file_index, ist->st->index);
2664  return AV_PIX_FMT_NONE;
2665  }
2666  continue;
2667  }
2668 
2669  if (ist->hw_frames_ctx) {
2671  if (!s->hw_frames_ctx)
2672  return AV_PIX_FMT_NONE;
2673  }
2674 
2675  ist->active_hwaccel_id = hwaccel->id;
2676  ist->hwaccel_pix_fmt = *p;
2677  break;
2678  }
2679 
2680  return *p;
2681 }
2682 
2684 {
2685  InputStream *ist = s->opaque;
2686 
2687  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2688  return ist->hwaccel_get_buffer(s, frame, flags);
2689 
2690  return avcodec_default_get_buffer2(s, frame, flags);
2691 }
2692 
2693 static int init_input_stream(int ist_index, char *error, int error_len)
2694 {
2695  int ret;
2696  InputStream *ist = input_streams[ist_index];
2697 
2698  if (ist->decoding_needed) {
2699  AVCodec *codec = ist->dec;
2700  if (!codec) {
2701  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2702  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2703  return AVERROR(EINVAL);
2704  }
2705 
2706  ist->dec_ctx->opaque = ist;
2707  ist->dec_ctx->get_format = get_format;
2708  ist->dec_ctx->get_buffer2 = get_buffer;
2709  ist->dec_ctx->thread_safe_callbacks = 1;
2710 
2711  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2712  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2713  (ist->decoding_needed & DECODING_FOR_OST)) {
2714  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2716  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2717  }
2718 
2719  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2720 
2721  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2722  * audio, and video decoders such as cuvid or mediacodec */
2724 
2725  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2726  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2727  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2728  if (ret == AVERROR_EXPERIMENTAL)
2729  abort_codec_experimental(codec, 0);
2730 
2731  snprintf(error, error_len,
2732  "Error while opening decoder for input stream "
2733  "#%d:%d : %s",
2734  ist->file_index, ist->st->index, av_err2str(ret));
2735  return ret;
2736  }
2738  }
2739 
2740  ist->next_pts = AV_NOPTS_VALUE;
2741  ist->next_dts = AV_NOPTS_VALUE;
2742 
2743  return 0;
2744 }
2745 
2747 {
2748  if (ost->source_index >= 0)
2749  return input_streams[ost->source_index];
2750  return NULL;
2751 }
2752 
2753 static int compare_int64(const void *a, const void *b)
2754 {
2755  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2756 }
2757 
2758 /* open the muxer when all the streams are initialized */
2759 static int check_init_output_file(OutputFile *of, int file_index)
2760 {
2761  int ret, i;
2762 
2763  for (i = 0; i < of->ctx->nb_streams; i++) {
2764  OutputStream *ost = output_streams[of->ost_index + i];
2765  if (!ost->initialized)
2766  return 0;
2767  }
2768 
2769  of->ctx->interrupt_callback = int_cb;
2770 
2771  ret = avformat_write_header(of->ctx, &of->opts);
2772  if (ret < 0) {
2774  "Could not write header for output file #%d "
2775  "(incorrect codec parameters ?): %s",
2776  file_index, av_err2str(ret));
2777  return ret;
2778  }
2779  //assert_avoptions(of->opts);
2780  of->header_written = 1;
2781 
2782  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2783 
2784  if (sdp_filename || want_sdp)
2785  print_sdp();
2786 
2787  /* flush the muxing queues */
2788  for (i = 0; i < of->ctx->nb_streams; i++) {
2789  OutputStream *ost = output_streams[of->ost_index + i];
2790 
2791  while (av_fifo_size(ost->muxing_queue)) {
2792  AVPacket pkt;
2793  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2794  write_packet(of, &pkt, ost);
2795  }
2796  }
2797 
2798  return 0;
2799 }
2800 
2802 {
2803  AVBSFContext *ctx;
2804  int i, ret;
2805 
2806  if (!ost->nb_bitstream_filters)
2807  return 0;
2808 
2809  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2810  ctx = ost->bsf_ctx[i];
2811 
2812  ret = avcodec_parameters_copy(ctx->par_in,
2813  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2814  if (ret < 0)
2815  return ret;
2816 
2817  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2818 
2819  ret = av_bsf_init(ctx);
2820  if (ret < 0) {
2821  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2822  ost->bsf_ctx[i]->filter->name);
2823  return ret;
2824  }
2825  }
2826 
2827  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2828  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2829  if (ret < 0)
2830  return ret;
2831 
2832  ost->st->time_base = ctx->time_base_out;
2833 
2834  return 0;
2835 }
2836 
2838 {
2839  OutputFile *of = output_files[ost->file_index];
2840  InputStream *ist = get_input_stream(ost);
2841  AVCodecParameters *par_dst = ost->st->codecpar;
2842  AVCodecParameters *par_src = ost->ref_par;
2843  AVRational sar;
2844  int i, ret;
2845  uint64_t extra_size;
2846 
2847  av_assert0(ist && !ost->filter);
2848 
2850  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2851  if (ret < 0) {
2853  "Error setting up codec context options.\n");
2854  return ret;
2855  }
2857 
2858  extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2859 
2860  if (extra_size > INT_MAX) {
2861  return AVERROR(EINVAL);
2862  }
2863 
2864  /* if stream_copy is selected, no need to decode or encode */
2865  par_dst->codec_id = par_src->codec_id;
2866  par_dst->codec_type = par_src->codec_type;
2867 
2868  if (!par_dst->codec_tag) {
2869  unsigned int codec_tag;
2870  if (!of->ctx->oformat->codec_tag ||
2871  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2872  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2873  par_dst->codec_tag = par_src->codec_tag;
2874  }
2875 
2876  par_dst->bit_rate = par_src->bit_rate;
2877  par_dst->field_order = par_src->field_order;
2878  par_dst->chroma_location = par_src->chroma_location;
2879 
2880  if (par_src->extradata_size) {
2881  par_dst->extradata = av_mallocz(extra_size);
2882  if (!par_dst->extradata) {
2883  return AVERROR(ENOMEM);
2884  }
2885  memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2886  par_dst->extradata_size = par_src->extradata_size;
2887  }
2888  par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2889  par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2890 
2891  if (!ost->frame_rate.num)
2892  ost->frame_rate = ist->framerate;
2893  ost->st->avg_frame_rate = ost->frame_rate;
2894 
2896  if (ret < 0)
2897  return ret;
2898 
2899  // copy timebase while removing common factors
2901 
2902  if (ist->st->nb_side_data) {
2903  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2904  sizeof(*ist->st->side_data));
2905  if (!ost->st->side_data)
2906  return AVERROR(ENOMEM);
2907 
2908  ost->st->nb_side_data = 0;
2909  for (i = 0; i < ist->st->nb_side_data; i++) {
2910  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2911  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2912 
2913  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2914  continue;
2915 
2916  sd_dst->data = av_malloc(sd_src->size);
2917  if (!sd_dst->data)
2918  return AVERROR(ENOMEM);
2919  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2920  sd_dst->size = sd_src->size;
2921  sd_dst->type = sd_src->type;
2922  ost->st->nb_side_data++;
2923  }
2924  }
2925 
2926  ost->parser = av_parser_init(par_dst->codec_id);
2927  ost->parser_avctx = avcodec_alloc_context3(NULL);
2928  if (!ost->parser_avctx)
2929  return AVERROR(ENOMEM);
2930 
2931  switch (par_dst->codec_type) {
2932  case AVMEDIA_TYPE_AUDIO:
2933  if (audio_volume != 256) {
2934  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2935  exit_program(1);
2936  }
2937  par_dst->channel_layout = par_src->channel_layout;
2938  par_dst->sample_rate = par_src->sample_rate;
2939  par_dst->channels = par_src->channels;
2940  par_dst->frame_size = par_src->frame_size;
2941  par_dst->block_align = par_src->block_align;
2942  par_dst->initial_padding = par_src->initial_padding;
2943  par_dst->trailing_padding = par_src->trailing_padding;
2944  par_dst->profile = par_src->profile;
2945  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2946  par_dst->block_align= 0;
2947  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2948  par_dst->block_align= 0;
2949  break;
2950  case AVMEDIA_TYPE_VIDEO:
2951  par_dst->format = par_src->format;
2952  par_dst->color_space = par_src->color_space;
2953  par_dst->color_range = par_src->color_range;
2954  par_dst->color_primaries = par_src->color_primaries;
2955  par_dst->color_trc = par_src->color_trc;
2956  par_dst->width = par_src->width;
2957  par_dst->height = par_src->height;
2958  par_dst->video_delay = par_src->video_delay;
2959  par_dst->profile = par_src->profile;
2960  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2961  sar =
2962  av_mul_q(ost->frame_aspect_ratio,
2963  (AVRational){ par_dst->height, par_dst->width });
2964  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2965  "with stream copy may produce invalid files\n");
2966  }
2967  else if (ist->st->sample_aspect_ratio.num)
2968  sar = ist->st->sample_aspect_ratio;
2969  else
2970  sar = par_src->sample_aspect_ratio;
2971  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2972  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2973  ost->st->r_frame_rate = ist->st->r_frame_rate;
2974  break;
2975  case AVMEDIA_TYPE_SUBTITLE:
2976  par_dst->width = par_src->width;
2977  par_dst->height = par_src->height;
2978  break;
2979  case AVMEDIA_TYPE_UNKNOWN:
2980  case AVMEDIA_TYPE_DATA:
2982  break;
2983  default:
2984  abort();
2985  }
2986 
2987  return 0;
2988 }
2989 
2990 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2991 {
2992  int ret = 0;
2993 
2994  if (ost->encoding_needed) {
2995  AVCodec *codec = ost->enc;
2996  AVCodecContext *dec = NULL;
2997  InputStream *ist;
2998 
2999  if ((ist = get_input_stream(ost)))
3000  dec = ist->dec_ctx;
3001  if (dec && dec->subtitle_header) {
3002  /* ASS code assumes this buffer is null terminated so add extra byte. */
3004  if (!ost->enc_ctx->subtitle_header)
3005  return AVERROR(ENOMEM);
3006  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3008  }
3009  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3010  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3011  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3012  !codec->defaults &&
3013  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3014  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3015  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3016 
3017  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3019  if (!ost->enc_ctx->hw_frames_ctx)
3020  return AVERROR(ENOMEM);
3021  }
3022 
3023  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3024  if (ret == AVERROR_EXPERIMENTAL)
3025  abort_codec_experimental(codec, 1);
3026  snprintf(error, error_len,
3027  "Error while opening encoder for output stream #%d:%d - "
3028  "maybe incorrect parameters such as bit_rate, rate, width or height",
3029  ost->file_index, ost->index);
3030  return ret;
3031  }
3032  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3033  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3035  ost->enc_ctx->frame_size);
3037  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3038  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3039  " It takes bits/s as argument, not kbits/s\n");
3040 
3042  if (ret < 0) {
3044  "Error initializing the output stream codec context.\n");
3045  exit_program(1);
3046  }
3047  /*
3048  * FIXME: ost->st->codec should't be needed here anymore.
3049  */
3050  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3051  if (ret < 0)
3052  return ret;
3053 
3054  if (ost->enc_ctx->nb_coded_side_data) {
3055  int i;
3056 
3058  sizeof(*ost->st->side_data));
3059  if (!ost->st->side_data)
3060  return AVERROR(ENOMEM);
3061 
3062  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3063  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3064  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3065 
3066  sd_dst->data = av_malloc(sd_src->size);
3067  if (!sd_dst->data)
3068  return AVERROR(ENOMEM);
3069  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3070  sd_dst->size = sd_src->size;
3071  sd_dst->type = sd_src->type;
3072  ost->st->nb_side_data++;
3073  }
3074  }
3075 
3076  // copy timebase while removing common factors
3077  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3078  ost->st->codec->codec= ost->enc_ctx->codec;
3079  } else if (ost->stream_copy) {
3080  ret = init_output_stream_streamcopy(ost);
3081  if (ret < 0)
3082  return ret;
3083 
3084  /*
3085  * FIXME: will the codec context used by the parser during streamcopy
3086  * This should go away with the new parser API.
3087  */
3088  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3089  if (ret < 0)
3090  return ret;
3091  }
3092 
3093  /* initialize bitstream filters for the output stream
3094  * needs to be done here, because the codec id for streamcopy is not
3095  * known until now */
3096  ret = init_output_bsfs(ost);
3097  if (ret < 0)
3098  return ret;
3099 
3100  ost->initialized = 1;
3101 
3102  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3103  if (ret < 0)
3104  return ret;
3105 
3106  return ret;
3107 }
3108 
3109 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3110  AVCodecContext *avctx)
3111 {
3112  char *p;
3113  int n = 1, i, size, index = 0;
3114  int64_t t, *pts;
3115 
3116  for (p = kf; *p; p++)
3117  if (*p == ',')
3118  n++;
3119  size = n;
3120  pts = av_malloc_array(size, sizeof(*pts));
3121  if (!pts) {
3122  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3123  exit_program(1);
3124  }
3125 
3126  p = kf;
3127  for (i = 0; i < n; i++) {
3128  char *next = strchr(p, ',');
3129 
3130  if (next)
3131  *next++ = 0;
3132 
3133  if (!memcmp(p, "chapters", 8)) {
3134 
3135  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3136  int j;
3137 
3138  if (avf->nb_chapters > INT_MAX - size ||
3139  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3140  sizeof(*pts)))) {
3142  "Could not allocate forced key frames array.\n");
3143  exit_program(1);
3144  }
3145  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3146  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3147 
3148  for (j = 0; j < avf->nb_chapters; j++) {
3149  AVChapter *c = avf->chapters[j];
3150  av_assert1(index < size);
3151  pts[index++] = av_rescale_q(c->start, c->time_base,
3152  avctx->time_base) + t;
3153  }
3154 
3155  } else {
3156 
3157  t = parse_time_or_die("force_key_frames", p, 1);
3158  av_assert1(index < size);
3159  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3160 
3161  }
3162 
3163  p = next;
3164  }
3165 
3166  av_assert0(index == size);
3167  qsort(pts, size, sizeof(*pts), compare_int64);
3168  ost->forced_kf_count = size;
3169  ost->forced_kf_pts = pts;
3170 }
3171 
3172 static void report_new_stream(int input_index, AVPacket *pkt)
3173 {
3174  InputFile *file = input_files[input_index];
3175  AVStream *st = file->ctx->streams[pkt->stream_index];
3176 
3177  if (pkt->stream_index < file->nb_streams_warn)
3178  return;
3179  av_log(file->ctx, AV_LOG_WARNING,
3180  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3182  input_index, pkt->stream_index,
3183  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3184  file->nb_streams_warn = pkt->stream_index + 1;
3185 }
3186 
3188 {
3189  AVDictionaryEntry *e;
3190 
3191  uint8_t *encoder_string;
3192  int encoder_string_len;
3193  int format_flags = 0;
3194  int codec_flags = 0;
3195 
3196  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3197  return;
3198 
3199  e = av_dict_get(of->opts, "fflags", NULL, 0);
3200  if (e) {
3201  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3202  if (!o)
3203  return;
3204  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3205  }
3206  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3207  if (e) {
3208  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3209  if (!o)
3210  return;
3211  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3212  }
3213 
3214  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3215  encoder_string = av_mallocz(encoder_string_len);
3216  if (!encoder_string)
3217  exit_program(1);
3218 
3219  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3220  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3221  else
3222  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3223  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3224  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3226 }
3227 
3228 static int transcode_init(void)
3229 {
3230  int ret = 0, i, j, k;
3231  AVFormatContext *oc;
3232  OutputStream *ost;
3233  InputStream *ist;
3234  char error[1024] = {0};
3235 
3236  for (i = 0; i < nb_filtergraphs; i++) {
3237  FilterGraph *fg = filtergraphs[i];
3238  for (j = 0; j < fg->nb_outputs; j++) {
3239  OutputFilter *ofilter = fg->outputs[j];
3240  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3241  continue;
3242  if (fg->nb_inputs != 1)
3243  continue;
3244  for (k = nb_input_streams-1; k >= 0 ; k--)
3245  if (fg->inputs[0]->ist == input_streams[k])
3246  break;
3247  ofilter->ost->source_index = k;
3248  }
3249  }
3250 
3251  /* init framerate emulation */
3252  for (i = 0; i < nb_input_files; i++) {
3253  InputFile *ifile = input_files[i];
3254  if (ifile->rate_emu)
3255  for (j = 0; j < ifile->nb_streams; j++)
3256  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3257  }
3258 
3259  /* for each output stream, we compute the right encoding parameters */
3260  for (i = 0; i < nb_output_streams; i++) {
3261  ost = output_streams[i];
3262  oc = output_files[ost->file_index]->ctx;
3263  ist = get_input_stream(ost);
3264 
3265  if (ost->attachment_filename)
3266  continue;
3267 
3268  if (ist) {
3269  ost->st->disposition = ist->st->disposition;
3270  } else {
3271  for (j=0; j<oc->nb_streams; j++) {
3272  AVStream *st = oc->streams[j];
3273  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3274  break;
3275  }
3276  if (j == oc->nb_streams)
3277  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3280  }
3281 
3282  if (!ost->stream_copy) {
3283  AVCodecContext *enc_ctx = ost->enc_ctx;
3285 
3286  set_encoder_id(output_files[ost->file_index], ost);
3287 
3288  if (ist) {
3289  dec_ctx = ist->dec_ctx;
3290 
3291  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3292  }
3293 
3294 #if CONFIG_LIBMFX
3295  if (qsv_transcode_init(ost))
3296  exit_program(1);
3297 #endif
3298 
3299 #if CONFIG_CUVID
3300  if (cuvid_transcode_init(ost))
3301  exit_program(1);
3302 #endif
3303 
3304  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3305  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3307  FilterGraph *fg = ost->filter->graph;
3308  if (configure_filtergraph(fg)) {
3309  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3310  exit_program(1);
3311  }
3312  }
3313 
3314  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3315  if (!ost->frame_rate.num)
3317  if (ist && !ost->frame_rate.num)
3318  ost->frame_rate = ist->framerate;
3319  if (ist && !ost->frame_rate.num)
3320  ost->frame_rate = ist->st->r_frame_rate;
3321  if (ist && !ost->frame_rate.num) {
3322  ost->frame_rate = (AVRational){25, 1};
3324  "No information "
3325  "about the input framerate is available. Falling "
3326  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3327  "if you want a different framerate.\n",
3328  ost->file_index, ost->index);
3329  }
3330 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3331  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3332  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3333  ost->frame_rate = ost->enc->supported_framerates[idx];
3334  }
3335  // reduce frame rate for mpeg4 to be within the spec limits
3336  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3337  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3338  ost->frame_rate.num, ost->frame_rate.den, 65535);
3339  }
3340  }
3341 
3342  switch (enc_ctx->codec_type) {
3343  case AVMEDIA_TYPE_AUDIO:
3344  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3345  if (dec_ctx)
3346  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3347  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3348  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3349  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3350  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3351  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3352  break;
3353  case AVMEDIA_TYPE_VIDEO:
3354  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3355  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3356  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3357  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3359  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3360  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3361  }
3362  for (j = 0; j < ost->forced_kf_count; j++)
3363  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3365  enc_ctx->time_base);
3366 
3367  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3368  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3369  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3370  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3371  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3373  if (!strncmp(ost->enc->name, "libx264", 7) &&
3374  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3377  "No pixel format specified, %s for H.264 encoding chosen.\n"
3378  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3380  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3381  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3384  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3385  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3387  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3388  if (dec_ctx)
3389  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3391 
3392  ost->st->avg_frame_rate = ost->frame_rate;
3393 
3394  if (!dec_ctx ||
3395  enc_ctx->width != dec_ctx->width ||
3396  enc_ctx->height != dec_ctx->height ||
3397  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3399  }
3400 
3401  if (ost->forced_keyframes) {
3402  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3405  if (ret < 0) {
3407  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3408  return ret;
3409  }
3414 
3415  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416  // parse it only for static kf timings
3417  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3419  }
3420  }
3421  break;
3422  case AVMEDIA_TYPE_SUBTITLE:
3423  enc_ctx->time_base = (AVRational){1, 1000};
3424  if (!enc_ctx->width) {
3425  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3427  }
3428  break;
3429  case AVMEDIA_TYPE_DATA:
3430  break;
3431  default:
3432  abort();
3433  break;
3434  }
3435  }
3436 
3437  if (ost->disposition) {
3438  static const AVOption opts[] = {
3439  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3440  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3441  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3442  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3443  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3444  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3445  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3446  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3447  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3448  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3449  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3450  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3451  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3452  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3453  { NULL },
3454  };
3455  static const AVClass class = {
3456  .class_name = "",
3457  .item_name = av_default_item_name,
3458  .option = opts,
3459  .version = LIBAVUTIL_VERSION_INT,
3460  };
3461  const AVClass *pclass = &class;
3462 
3463  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3464  if (ret < 0)
3465  goto dump_format;
3466  }
3467  }
3468 
3469  /* init input streams */
3470  for (i = 0; i < nb_input_streams; i++)
3471  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3472  for (i = 0; i < nb_output_streams; i++) {
3473  ost = output_streams[i];
3474  avcodec_close(ost->enc_ctx);
3475  }
3476  goto dump_format;
3477  }
3478 
3479  /* open each encoder */
3480  for (i = 0; i < nb_output_streams; i++) {
3481  ret = init_output_stream(output_streams[i], error, sizeof(error));
3482  if (ret < 0)
3483  goto dump_format;
3484  }
3485 
3486  /* discard unused programs */
3487  for (i = 0; i < nb_input_files; i++) {
3488  InputFile *ifile = input_files[i];
3489  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3490  AVProgram *p = ifile->ctx->programs[j];
3491  int discard = AVDISCARD_ALL;
3492 
3493  for (k = 0; k < p->nb_stream_indexes; k++)
3494  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3495  discard = AVDISCARD_DEFAULT;
3496  break;
3497  }
3498  p->discard = discard;
3499  }
3500  }
3501 
3502  /* write headers for files with no streams */
3503  for (i = 0; i < nb_output_files; i++) {
3504  oc = output_files[i]->ctx;
3505  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3506  ret = check_init_output_file(output_files[i], i);
3507  if (ret < 0)
3508  goto dump_format;
3509  }
3510  }
3511 
3512  dump_format:
3513  /* dump the stream mapping */
3514  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3515  for (i = 0; i < nb_input_streams; i++) {
3516  ist = input_streams[i];
3517 
3518  for (j = 0; j < ist->nb_filters; j++) {
3519  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3520  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3521  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3522  ist->filters[j]->name);
3523  if (nb_filtergraphs > 1)
3524  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3525  av_log(NULL, AV_LOG_INFO, "\n");
3526  }
3527  }
3528  }
3529 
3530  for (i = 0; i < nb_output_streams; i++) {
3531  ost = output_streams[i];
3532 
3533  if (ost->attachment_filename) {
3534  /* an attached file */
3535  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3536  ost->attachment_filename, ost->file_index, ost->index);
3537  continue;
3538  }
3539 
3540  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3541  /* output from a complex graph */
3542  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3543  if (nb_filtergraphs > 1)
3544  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3545 
3546  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3547  ost->index, ost->enc ? ost->enc->name : "?");
3548  continue;
3549  }
3550 
3551  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3552  input_streams[ost->source_index]->file_index,
3553  input_streams[ost->source_index]->st->index,
3554  ost->file_index,
3555  ost->index);
3556  if (ost->sync_ist != input_streams[ost->source_index])
3557  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3558  ost->sync_ist->file_index,
3559  ost->sync_ist->st->index);
3560  if (ost->stream_copy)
3561  av_log(NULL, AV_LOG_INFO, " (copy)");
3562  else {
3563  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3564  const AVCodec *out_codec = ost->enc;
3565  const char *decoder_name = "?";
3566  const char *in_codec_name = "?";
3567  const char *encoder_name = "?";
3568  const char *out_codec_name = "?";
3569  const AVCodecDescriptor *desc;
3570 
3571  if (in_codec) {
3572  decoder_name = in_codec->name;
3573  desc = avcodec_descriptor_get(in_codec->id);
3574  if (desc)
3575  in_codec_name = desc->name;
3576  if (!strcmp(decoder_name, in_codec_name))
3577  decoder_name = "native";
3578  }
3579 
3580  if (out_codec) {
3581  encoder_name = out_codec->name;
3582  desc = avcodec_descriptor_get(out_codec->id);
3583  if (desc)
3584  out_codec_name = desc->name;
3585  if (!strcmp(encoder_name, out_codec_name))
3586  encoder_name = "native";
3587  }
3588 
3589  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3590  in_codec_name, decoder_name,
3591  out_codec_name, encoder_name);
3592  }
3593  av_log(NULL, AV_LOG_INFO, "\n");
3594  }
3595 
3596  if (ret) {
3597  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3598  return ret;
3599  }
3600 
3601  transcode_init_done = 1;
3602 
3603  return 0;
3604 }
3605 
3606 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3607 static int need_output(void)
3608 {
3609  int i;
3610 
3611  for (i = 0; i < nb_output_streams; i++) {
3612  OutputStream *ost = output_streams[i];
3613  OutputFile *of = output_files[ost->file_index];
3614  AVFormatContext *os = output_files[ost->file_index]->ctx;
3615 
3616  if (ost->finished ||
3617  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3618  continue;
3619  if (ost->frame_number >= ost->max_frames) {
3620  int j;
3621  for (j = 0; j < of->ctx->nb_streams; j++)
3622  close_output_stream(output_streams[of->ost_index + j]);
3623  continue;
3624  }
3625 
3626  return 1;
3627  }
3628 
3629  return 0;
3630 }
3631 
3632 /**
3633  * Select the output stream to process.
3634  *
3635  * @return selected output stream, or NULL if none available
3636  */
3638 {
3639  int i;
3640  int64_t opts_min = INT64_MAX;
3641  OutputStream *ost_min = NULL;
3642 
3643  for (i = 0; i < nb_output_streams; i++) {
3644  OutputStream *ost = output_streams[i];
3645  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3646  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3647  AV_TIME_BASE_Q);
3648  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3649  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3650 
3651  if (!ost->finished && opts < opts_min) {
3652  opts_min = opts;
3653  ost_min = ost->unavailable ? NULL : ost;
3654  }
3655  }
3656  return ost_min;
3657 }
3658 
3659 static void set_tty_echo(int on)
3660 {
3661 #if HAVE_TERMIOS_H
3662  struct termios tty;
3663  if (tcgetattr(0, &tty) == 0) {
3664  if (on) tty.c_lflag |= ECHO;
3665  else tty.c_lflag &= ~ECHO;
3666  tcsetattr(0, TCSANOW, &tty);
3667  }
3668 #endif
3669 }
3670 
3672 {
3673  int i, ret, key;
3674  static int64_t last_time;
3675  if (received_nb_signals)
3676  return AVERROR_EXIT;
3677  /* read_key() returns 0 on EOF */
3678  if(cur_time - last_time >= 100000 && !run_as_daemon){
3679  key = read_key();
3680  last_time = cur_time;
3681  }else
3682  key = -1;
3683  if (key == 'q')
3684  return AVERROR_EXIT;
3685  if (key == '+') av_log_set_level(av_log_get_level()+10);
3686  if (key == '-') av_log_set_level(av_log_get_level()-10);
3687  if (key == 's') qp_hist ^= 1;
3688  if (key == 'h'){
3689  if (do_hex_dump){
3690  do_hex_dump = do_pkt_dump = 0;
3691  } else if(do_pkt_dump){
3692  do_hex_dump = 1;
3693  } else
3694  do_pkt_dump = 1;
3696  }
3697  if (key == 'c' || key == 'C'){
3698  char buf[4096], target[64], command[256], arg[256] = {0};
3699  double time;
3700  int k, n = 0;
3701  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3702  i = 0;
3703  set_tty_echo(1);
3704  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3705  if (k > 0)
3706  buf[i++] = k;
3707  buf[i] = 0;
3708  set_tty_echo(0);
3709  fprintf(stderr, "\n");
3710  if (k > 0 &&
3711  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3712  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3713  target, time, command, arg);
3714  for (i = 0; i < nb_filtergraphs; i++) {
3715  FilterGraph *fg = filtergraphs[i];
3716  if (fg->graph) {
3717  if (time < 0) {
3718  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3719  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3720  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3721  } else if (key == 'c') {
3722  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3723  ret = AVERROR_PATCHWELCOME;
3724  } else {
3725  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3726  if (ret < 0)
3727  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3728  }
3729  }
3730  }
3731  } else {
3733  "Parse error, at least 3 arguments were expected, "
3734  "only %d given in string '%s'\n", n, buf);
3735  }
3736  }
3737  if (key == 'd' || key == 'D'){
3738  int debug=0;
3739  if(key == 'D') {
3740  debug = input_streams[0]->st->codec->debug<<1;
3741  if(!debug) debug = 1;
3742  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3743  debug += debug;
3744  }else{
3745  char buf[32];
3746  int k = 0;
3747  i = 0;
3748  set_tty_echo(1);
3749  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3750  if (k > 0)
3751  buf[i++] = k;
3752  buf[i] = 0;
3753  set_tty_echo(0);
3754  fprintf(stderr, "\n");
3755  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3756  fprintf(stderr,"error parsing debug value\n");
3757  }
3758  for(i=0;i<nb_input_streams;i++) {
3759  input_streams[i]->st->codec->debug = debug;
3760  }
3761  for(i=0;i<nb_output_streams;i++) {
3762  OutputStream *ost = output_streams[i];
3763  ost->enc_ctx->debug = debug;
3764  }
3765  if(debug) av_log_set_level(AV_LOG_DEBUG);
3766  fprintf(stderr,"debug=%d\n", debug);
3767  }
3768  if (key == '?'){
3769  fprintf(stderr, "key function\n"
3770  "? show this help\n"
3771  "+ increase verbosity\n"
3772  "- decrease verbosity\n"
3773  "c Send command to first matching filter supporting it\n"
3774  "C Send/Queue command to all matching filters\n"
3775  "D cycle through available debug modes\n"
3776  "h dump packets/hex press to cycle through the 3 states\n"
3777  "q quit\n"
3778  "s Show QP histogram\n"
3779  );
3780  }
3781  return 0;
3782 }
3783 
3784 #if HAVE_PTHREADS
3785 static void *input_thread(void *arg)
3786 {
3787  InputFile *f = arg;
3788  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3789  int ret = 0;
3790 
3791  while (1) {
3792  AVPacket pkt;
3793  ret = av_read_frame(f->ctx, &pkt);
3794 
3795  if (ret == AVERROR(EAGAIN)) {
3796  av_usleep(10000);
3797  continue;
3798  }
3799  if (ret < 0) {
3800  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3801  break;
3802  }
3803  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3804  if (flags && ret == AVERROR(EAGAIN)) {
3805  flags = 0;
3806  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3808  "Thread message queue blocking; consider raising the "
3809  "thread_queue_size option (current value: %d)\n",
3810  f->thread_queue_size);
3811  }
3812  if (ret < 0) {
3813  if (ret != AVERROR_EOF)
3814  av_log(f->ctx, AV_LOG_ERROR,
3815  "Unable to send packet to main thread: %s\n",
3816  av_err2str(ret));
3817  av_packet_unref(&pkt);
3818  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3819  break;
3820  }
3821  }
3822 
3823  return NULL;
3824 }
3825 
3826 static void free_input_threads(void)
3827 {
3828  int i;
3829 
3830  for (i = 0; i < nb_input_files; i++) {
3831  InputFile *f = input_files[i];
3832  AVPacket pkt;
3833 
3834  if (!f || !f->in_thread_queue)
3835  continue;
3837  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3838  av_packet_unref(&pkt);
3839 
3840  pthread_join(f->thread, NULL);
3841  f->joined = 1;
3842  av_thread_message_queue_free(&f->in_thread_queue);
3843  }
3844 }
3845 
3846 static int init_input_threads(void)
3847 {
3848  int i, ret;
3849 
3850  if (nb_input_files == 1)
3851  return 0;
3852 
3853  for (i = 0; i < nb_input_files; i++) {
3854  InputFile *f = input_files[i];
3855 
3856  if (f->ctx->pb ? !f->ctx->pb->seekable :
3857  strcmp(f->ctx->iformat->name, "lavfi"))
3858  f->non_blocking = 1;
3859  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3860  f->thread_queue_size, sizeof(AVPacket));
3861  if (ret < 0)
3862  return ret;
3863 
3864  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3865  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3866  av_thread_message_queue_free(&f->in_thread_queue);
3867  return AVERROR(ret);
3868  }
3869  }
3870  return 0;
3871 }
3872 
3873 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3874 {
3875  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3876  f->non_blocking ?
3878 }
3879 #endif
3880 
3882 {
3883  if (f->rate_emu) {
3884  int i;
3885  for (i = 0; i < f->nb_streams; i++) {
3886  InputStream *ist = input_streams[f->ist_index + i];
3887  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3888  int64_t now = av_gettime_relative() - ist->start;
3889  if (pts > now)
3890  return AVERROR(EAGAIN);
3891  }
3892  }
3893 
3894 #if HAVE_PTHREADS
3895  if (nb_input_files > 1)
3896  return get_input_packet_mt(f, pkt);
3897 #endif
3898  return av_read_frame(f->ctx, pkt);
3899 }
3900 
3901 static int got_eagain(void)
3902 {
3903  int i;
3904  for (i = 0; i < nb_output_streams; i++)
3905  if (output_streams[i]->unavailable)
3906  return 1;
3907  return 0;
3908 }
3909 
3910 static void reset_eagain(void)
3911 {
3912  int i;
3913  for (i = 0; i < nb_input_files; i++)
3914  input_files[i]->eagain = 0;
3915  for (i = 0; i < nb_output_streams; i++)
3916  output_streams[i]->unavailable = 0;
3917 }
3918 
3919 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3920 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3921  AVRational time_base)
3922 {
3923  int ret;
3924 
3925  if (!*duration) {
3926  *duration = tmp;
3927  return tmp_time_base;
3928  }
3929 
3930  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3931  if (ret < 0) {
3932  *duration = tmp;
3933  return tmp_time_base;
3934  }
3935 
3936  return time_base;
3937 }
3938 
3939 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3940 {
3941  InputStream *ist;
3942  AVCodecContext *avctx;
3943  int i, ret, has_audio = 0;
3944  int64_t duration = 0;
3945 
3946  ret = av_seek_frame(is, -1, is->start_time, 0);
3947  if (ret < 0)
3948  return ret;
3949 
3950  for (i = 0; i < ifile->nb_streams; i++) {
3951  ist = input_streams[ifile->ist_index + i];
3952  avctx = ist->dec_ctx;
3953 
3954  // flush decoders
3955  if (ist->decoding_needed) {
3956  process_input_packet(ist, NULL, 1);
3957  avcodec_flush_buffers(avctx);
3958  }
3959 
3960  /* duration is the length of the last frame in a stream
3961  * when audio stream is present we don't care about
3962  * last video frame length because it's not defined exactly */
3963  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3964  has_audio = 1;
3965  }
3966 
3967  for (i = 0; i < ifile->nb_streams; i++) {
3968  ist = input_streams[ifile->ist_index + i];
3969  avctx = ist->dec_ctx;
3970 
3971  if (has_audio) {
3972  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3973  AVRational sample_rate = {1, avctx->sample_rate};
3974 
3975  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3976  } else
3977  continue;
3978  } else {
3979  if (ist->framerate.num) {
3980  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3981  } else if (ist->st->avg_frame_rate.num) {
3982  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3983  } else duration = 1;
3984  }
3985  if (!ifile->duration)
3986  ifile->time_base = ist->st->time_base;
3987  /* the total duration of the stream, max_pts - min_pts is
3988  * the duration of the stream without the last frame */
3989  duration += ist->max_pts - ist->min_pts;
3990  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3991  ifile->time_base);
3992  }
3993 
3994  if (ifile->loop > 0)
3995  ifile->loop--;
3996 
3997  return ret;
3998 }
3999 
4000 /*
4001  * Return
4002  * - 0 -- one packet was read and processed
4003  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4004  * this function should be called again
4005  * - AVERROR_EOF -- this function should not be called again
4006  */
4007 static int process_input(int file_index)
4008 {
4009  InputFile *ifile = input_files[file_index];
4010  AVFormatContext *is;
4011  InputStream *ist;
4012  AVPacket pkt;
4013  int ret, i, j;
4014  int64_t duration;
4015  int64_t pkt_dts;
4016 
4017  is = ifile->ctx;
4018  ret = get_input_packet(ifile, &pkt);
4019 
4020  if (ret == AVERROR(EAGAIN)) {
4021  ifile->eagain = 1;
4022  return ret;
4023  }
4024  if (ret < 0 && ifile->loop) {
4025  if ((ret = seek_to_start(ifile, is)) < 0)
4026  return ret;
4027  ret = get_input_packet(ifile, &pkt);
4028  if (ret == AVERROR(EAGAIN)) {
4029  ifile->eagain = 1;
4030  return ret;
4031  }
4032  }
4033  if (ret < 0) {
4034  if (ret != AVERROR_EOF) {
4035  print_error(is->filename, ret);
4036  if (exit_on_error)
4037  exit_program(1);
4038  }
4039 
4040  for (i = 0; i < ifile->nb_streams; i++) {
4041  ist = input_streams[ifile->ist_index + i];
4042  if (ist->decoding_needed) {
4043  ret = process_input_packet(ist, NULL, 0);
4044  if (ret>0)
4045  return 0;
4046  }
4047 
4048  /* mark all outputs that don't go through lavfi as finished */
4049  for (j = 0; j < nb_output_streams; j++) {
4050  OutputStream *ost = output_streams[j];
4051 
4052  if (ost->source_index == ifile->ist_index + i &&
4053  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4054  finish_output_stream(ost);
4055  }
4056  }
4057 
4058  ifile->eof_reached = 1;
4059  return AVERROR(EAGAIN);
4060  }
4061 
4062  reset_eagain();
4063 
4064  if (do_pkt_dump) {
4066  is->streams[pkt.stream_index]);
4067  }
4068  /* the following test is needed in case new streams appear
4069  dynamically in stream : we ignore them */
4070  if (pkt.stream_index >= ifile->nb_streams) {
4071  report_new_stream(file_index, &pkt);
4072  goto discard_packet;
4073  }
4074 
4075  ist = input_streams[ifile->ist_index + pkt.stream_index];
4076 
4077  ist->data_size += pkt.size;
4078  ist->nb_packets++;
4079 
4080  if (ist->discard)
4081  goto discard_packet;
4082 
4083  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4084  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4085  exit_program(1);
4086  }
4087 
4088  if (debug_ts) {
4089  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4090  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4094  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4095  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4096  av_ts2str(input_files[ist->file_index]->ts_offset),
4097  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4098  }
4099 
4100  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4101  int64_t stime, stime2;
4102  // Correcting starttime based on the enabled streams
4103  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4104  // so we instead do it here as part of discontinuity handling
4105  if ( ist->next_dts == AV_NOPTS_VALUE
4106  && ifile->ts_offset == -is->start_time
4107  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4108  int64_t new_start_time = INT64_MAX;
4109  for (i=0; i<is->nb_streams; i++) {
4110  AVStream *st = is->streams[i];
4111  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4112  continue;
4113  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4114  }
4115  if (new_start_time > is->start_time) {
4116  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4117  ifile->ts_offset = -new_start_time;
4118  }
4119  }
4120 
4121  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4122  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4123  ist->wrap_correction_done = 1;
4124 
4125  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4126  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4127  ist->wrap_correction_done = 0;
4128  }
4129  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4130  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4131  ist->wrap_correction_done = 0;
4132  }
4133  }
4134 
4135  /* add the stream-global side data to the first packet */
4136  if (ist->nb_packets == 1) {
4137  if (ist->st->nb_side_data)
4139  for (i = 0; i < ist->st->nb_side_data; i++) {
4140  AVPacketSideData *src_sd = &ist->st->side_data[i];
4141  uint8_t *dst_data;
4142 
4143  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4144  continue;
4145  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4146  continue;
4147 
4148  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4149  if (!dst_data)
4150  exit_program(1);
4151 
4152  memcpy(dst_data, src_sd->data, src_sd->size);
4153  }
4154  }
4155 
4156  if (pkt.dts != AV_NOPTS_VALUE)
4157  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4158  if (pkt.pts != AV_NOPTS_VALUE)
4159  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4160 
4161  if (pkt.pts != AV_NOPTS_VALUE)
4162  pkt.pts *= ist->ts_scale;
4163  if (pkt.dts != AV_NOPTS_VALUE)
4164  pkt.dts *= ist->ts_scale;
4165 
4167  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4169  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4170  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4171  int64_t delta = pkt_dts - ifile->last_ts;
4172  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4173  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4174  ifile->ts_offset -= delta;
4176  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4177  delta, ifile->ts_offset);
4178  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4179  if (pkt.pts != AV_NOPTS_VALUE)
4180  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4181  }
4182  }
4183 
4184  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4185  if (pkt.pts != AV_NOPTS_VALUE) {
4186  pkt.pts += duration;
4187  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4188  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4189  }
4190 
4191  if (pkt.dts != AV_NOPTS_VALUE)
4192  pkt.dts += duration;
4193 
4195  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4197  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4198  !copy_ts) {
4199  int64_t delta = pkt_dts - ist->next_dts;
4200  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4201  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4202  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4203  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4204  ifile->ts_offset -= delta;
4206  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4207  delta, ifile->ts_offset);
4208  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4209  if (pkt.pts != AV_NOPTS_VALUE)
4210  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4211  }
4212  } else {
4213  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4214  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4215  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4216  pkt.dts = AV_NOPTS_VALUE;
4217  }
4218  if (pkt.pts != AV_NOPTS_VALUE){
4219  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4220  delta = pkt_pts - ist->next_dts;
4221  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4222  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4223  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4224  pkt.pts = AV_NOPTS_VALUE;
4225  }
4226  }
4227  }
4228  }
4229 
4230  if (pkt.dts != AV_NOPTS_VALUE)
4231  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4232 
4233  if (debug_ts) {
4234  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4236  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4237  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4238  av_ts2str(input_files[ist->file_index]->ts_offset),
4239  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4240  }
4241 
4242  sub2video_heartbeat(ist, pkt.pts);
4243 
4244  process_input_packet(ist, &pkt, 0);
4245 
4246 discard_packet:
4247  av_packet_unref(&pkt);
4248 
4249  return 0;
4250 }
4251 
4252 /**
4253  * Perform a step of transcoding for the specified filter graph.
4254  *
4255  * @param[in] graph filter graph to consider
4256  * @param[out] best_ist input stream where a frame would allow to continue
4257  * @return 0 for success, <0 for error
4258  */
4259 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4260 {
4261  int i, ret;
4262  int nb_requests, nb_requests_max = 0;
4263  InputFilter *ifilter;
4264  InputStream *ist;
4265 
4266  *best_ist = NULL;
4267  ret = avfilter_graph_request_oldest(graph->graph);
4268  if (ret >= 0)
4269  return reap_filters(0);
4270 
4271  if (ret == AVERROR_EOF) {
4272  ret = reap_filters(1);
4273  for (i = 0; i < graph->nb_outputs; i++)
4274  close_output_stream(graph->outputs[i]->ost);
4275  return ret;
4276  }
4277  if (ret != AVERROR(EAGAIN))
4278  return ret;
4279 
4280  for (i = 0; i < graph->nb_inputs; i++) {
4281  ifilter = graph->inputs[i];
4282  ist = ifilter->ist;
4283  if (input_files[ist->file_index]->eagain ||
4284  input_files[ist->file_index]->eof_reached)
4285  continue;
4286  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4287  if (nb_requests > nb_requests_max) {
4288  nb_requests_max = nb_requests;
4289  *best_ist = ist;
4290  }
4291  }
4292 
4293  if (!*best_ist)
4294  for (i = 0; i < graph->nb_outputs; i++)
4295  graph->outputs[i]->ost->unavailable = 1;
4296 
4297  return 0;
4298 }
4299 
4300 /**
4301  * Run a single step of transcoding.
4302  *
4303  * @return 0 for success, <0 for error
4304  */
4305 static int transcode_step(void)
4306 {
4307  OutputStream *ost;
4308  InputStream *ist;
4309  int ret;
4310 
4311  ost = choose_output();
4312  if (!ost) {
4313  if (got_eagain()) {
4314  reset_eagain();
4315  av_usleep(10000);
4316  return 0;
4317  }
4318  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4319  return AVERROR_EOF;
4320  }
4321 
4322  if (ost->filter) {
4323  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4324  return ret;
4325  if (!ist)
4326  return 0;
4327  } else {
4328  av_assert0(ost->source_index >= 0);
4329  ist = input_streams[ost->source_index];
4330  }
4331 
4332  ret = process_input(ist->file_index);
4333  if (ret == AVERROR(EAGAIN)) {
4334  if (input_files[ist->file_index]->eagain)
4335  ost->unavailable = 1;
4336  return 0;
4337  }
4338 
4339  if (ret < 0)
4340  return ret == AVERROR_EOF ? 0 : ret;
4341 
4342  return reap_filters(0);
4343 }
4344 
4345 /*
4346  * The following code is the main loop of the file converter
4347  */
4348 static int transcode(void)
4349 {
4350  int ret, i;
4351  AVFormatContext *os;
4352  OutputStream *ost;
4353  InputStream *ist;
4354  int64_t timer_start;
4355  int64_t total_packets_written = 0;
4356 
4357  ret = transcode_init();
4358  if (ret < 0)
4359  goto fail;
4360 
4361  if (stdin_interaction) {
4362  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4363  }
4364 
4365  timer_start = av_gettime_relative();
4366 
4367 #if HAVE_PTHREADS
4368  if ((ret = init_input_threads()) < 0)
4369  goto fail;
4370 #endif
4371 
4372  while (!received_sigterm) {
4373  int64_t cur_time= av_gettime_relative();
4374 
4375  /* if 'q' pressed, exits */
4376  if (stdin_interaction)
4377  if (check_keyboard_interaction(cur_time) < 0)
4378  break;
4379 
4380  /* check if there's any stream where output is still needed */
4381  if (!need_output()) {
4382  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4383  break;
4384  }
4385 
4386  ret = transcode_step();
4387  if (ret < 0 && ret != AVERROR_EOF) {
4388  char errbuf[128];
4389  av_strerror(ret, errbuf, sizeof(errbuf));
4390 
4391  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4392  break;
4393  }
4394 
4395  /* dump report by using the output first video and audio streams */
4396  print_report(0, timer_start, cur_time);
4397  }
4398 #if HAVE_PTHREADS
4399  free_input_threads();
4400 #endif
4401 
4402  /* at the end of stream, we must flush the decoder buffers */
4403  for (i = 0; i < nb_input_streams; i++) {
4404  ist = input_streams[i];
4405  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4406  process_input_packet(ist, NULL, 0);
4407  }
4408  }
4409  flush_encoders();
4410 
4411  term_exit();
4412 
4413  /* write the trailer if needed and close file */
4414  for (i = 0; i < nb_output_files; i++) {
4415  os = output_files[i]->ctx;
4416  if (!output_files[i]->header_written) {
4418  "Nothing was written into output file %d (%s), because "
4419  "at least one of its streams received no packets.\n",
4420  i, os->filename);
4421  continue;
4422  }
4423  if ((ret = av_write_trailer(os)) < 0) {
4424  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4425  if (exit_on_error)
4426  exit_program(1);
4427  }
4428  }
4429 
4430  /* dump report by using the first video and audio streams */
4431  print_report(1, timer_start, av_gettime_relative());
4432 
4433  /* close each encoder */
4434  for (i = 0; i < nb_output_streams; i++) {
4435  ost = output_streams[i];
4436  if (ost->encoding_needed) {
4437  av_freep(&ost->enc_ctx->stats_in);
4438  }
4439  total_packets_written += ost->packets_written;
4440  }
4441 
4442  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4443  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4444  exit_program(1);
4445  }
4446 
4447  /* close each decoder */
4448  for (i = 0; i < nb_input_streams; i++) {
4449  ist = input_streams[i];
4450  if (ist->decoding_needed) {
4451  avcodec_close(ist->dec_ctx);
4452  if (ist->hwaccel_uninit)
4453  ist->hwaccel_uninit(ist->dec_ctx);
4454  }
4455  }
4456 
4458 
4459  /* finished ! */
4460  ret = 0;
4461 
4462  fail:
4463 #if HAVE_PTHREADS
4464  free_input_threads();
4465 #endif
4466 
4467  if (output_streams) {
4468  for (i = 0; i < nb_output_streams; i++) {
4469  ost = output_streams[i];
4470  if (ost) {
4471  if (ost->logfile) {
4472  if (fclose(ost->logfile))
4474  "Error closing logfile, loss of information possible: %s\n",
4475  av_err2str(AVERROR(errno)));
4476  ost->logfile = NULL;
4477  }
4478  av_freep(&ost->forced_kf_pts);
4479  av_freep(&ost->apad);
4480  av_freep(&ost->disposition);
4481  av_dict_free(&ost->encoder_opts);
4482  av_dict_free(&ost->sws_dict);
4483  av_dict_free(&ost->swr_opts);
4484  av_dict_free(&ost->resample_opts);
4485  }
4486  }
4487  }
4488  return ret;
4489 }
4490 
4491 
4492 static int64_t getutime(void)
4493 {
4494 #if HAVE_GETRUSAGE
4495  struct rusage rusage;
4496 
4497  getrusage(RUSAGE_SELF, &rusage);
4498  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4499 #elif HAVE_GETPROCESSTIMES
4500  HANDLE proc;
4501  FILETIME c, e, k, u;
4502  proc = GetCurrentProcess();
4503  GetProcessTimes(proc, &c, &e, &k, &u);
4504  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4505 #else
4506  return av_gettime_relative();
4507 #endif
4508 }
4509 
4510 static int64_t getmaxrss(void)
4511 {
4512 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4513  struct rusage rusage;
4514  getrusage(RUSAGE_SELF, &rusage);
4515  return (int64_t)rusage.ru_maxrss * 1024;
4516 #elif HAVE_GETPROCESSMEMORYINFO
4517  HANDLE proc;
4518  PROCESS_MEMORY_COUNTERS memcounters;
4519  proc = GetCurrentProcess();
4520  memcounters.cb = sizeof(memcounters);
4521  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4522  return memcounters.PeakPagefileUsage;
4523 #else
4524  return 0;
4525 #endif
4526 }
4527 
4528 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4529 {
4530 }
4531 
4532 int main(int argc, char **argv)
4533 {
4534  int i, ret;
4535  int64_t ti;
4536 
4537  init_dynload();
4538 
4540 
4541  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4542 
4544  parse_loglevel(argc, argv, options);
4545 
4546  if(argc>1 && !strcmp(argv[1], "-d")){
4547  run_as_daemon=1;
4549  argc--;
4550  argv++;
4551  }
4552 
4554 #if CONFIG_AVDEVICE
4556 #endif
4558  av_register_all();
4560 
4561  show_banner(argc, argv, options);
4562 
4563  /* parse options and open all input/output files */
4564  ret = ffmpeg_parse_options(argc, argv);
4565  if (ret < 0)
4566  exit_program(1);
4567 
4568  if (nb_output_files <= 0 && nb_input_files == 0) {
4569  show_usage();
4570  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4571  exit_program(1);
4572  }
4573 
4574  /* file converter / grab */
4575  if (nb_output_files <= 0) {
4576  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4577  exit_program(1);
4578  }
4579 
4580 // if (nb_input_files == 0) {
4581 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4582 // exit_program(1);
4583 // }
4584 
4585  for (i = 0; i < nb_output_files; i++) {
4586  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4587  want_sdp = 0;
4588  }
4589 
4590  current_time = ti = getutime();
4591  if (transcode() < 0)
4592  exit_program(1);
4593  ti = getutime() - ti;
4594  if (do_benchmark) {
4595  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4596  }
4597  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4600  exit_program(69);
4601 
4603  return main_return_code;
4604 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:281
int nb_bitstream_filters
Definition: ffmpeg.h:424
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:895
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:113
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2939
enum AVChromaLocation chroma_location
Definition: avcodec.h:4070
int got_output
Definition: ffmpeg.h:309
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1882
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1060
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1995
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:413
const struct AVCodec * codec
Definition: avcodec.h:1685
Definition: ffmpeg.h:390
AVRational framerate
Definition: avcodec.h:3375
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4061
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:919
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:336
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:520
void term_init(void)
Definition: ffmpeg.c:368
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5762
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:240
int nb_outputs
Definition: ffmpeg.h:257
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:470
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:267
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
int resample_channels
Definition: ffmpeg.h:304
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2837
void term_exit(void)
Definition: ffmpeg.c:310
int stream_copy
Definition: ffmpeg.h:475
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1225
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3922