FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  if (ist->sub2video.sub_queue) {
539  while (av_fifo_size(ist->sub2video.sub_queue)) {
540  AVSubtitle sub;
541  av_fifo_generic_read(ist->sub2video.sub_queue,
542  &sub, sizeof(sub), NULL);
544  }
545  av_fifo_freep(&ist->sub2video.sub_queue);
546  }
547  av_buffer_unref(&ifilter->hw_frames_ctx);
548  av_freep(&ifilter->name);
549  av_freep(&fg->inputs[j]);
550  }
551  av_freep(&fg->inputs);
552  for (j = 0; j < fg->nb_outputs; j++) {
553  OutputFilter *ofilter = fg->outputs[j];
554 
555  avfilter_inout_free(&ofilter->out_tmp);
556  av_freep(&ofilter->name);
557  av_freep(&ofilter->formats);
558  av_freep(&ofilter->channel_layouts);
559  av_freep(&ofilter->sample_rates);
560  av_freep(&fg->outputs[j]);
561  }
562  av_freep(&fg->outputs);
563  av_freep(&fg->graph_desc);
564 
566  }
568 
570 
571  /* close files */
572  for (i = 0; i < nb_output_files; i++) {
573  OutputFile *of = output_files[i];
575  if (!of)
576  continue;
577  s = of->ctx;
578  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
579  avio_closep(&s->pb);
581  av_dict_free(&of->opts);
582 
584  }
585  for (i = 0; i < nb_output_streams; i++) {
587 
588  if (!ost)
589  continue;
590 
592 
597 
600  av_freep(&ost->avfilter);
602 
605 
608 
611 
612  if (ost->muxing_queue) {
613  while (av_fifo_size(ost->muxing_queue)) {
614  AVPacket *pkt;
617  }
619  }
620 
622  }
623 #if HAVE_THREADS
624  free_input_threads();
625 #endif
626  for (i = 0; i < nb_input_files; i++) {
630  }
631  for (i = 0; i < nb_input_streams; i++) {
633 
634  av_frame_free(&ist->decoded_frame);
635  av_frame_free(&ist->filter_frame);
636  av_packet_free(&ist->pkt);
637  av_dict_free(&ist->decoder_opts);
638  avsubtitle_free(&ist->prev_sub.subtitle);
639  av_frame_free(&ist->sub2video.frame);
640  av_freep(&ist->filters);
641  av_freep(&ist->hwaccel_device);
642  av_freep(&ist->dts_buffer);
643 
644  avcodec_free_context(&ist->dec_ctx);
645 
647  }
648 
649  if (vstats_file) {
650  if (fclose(vstats_file))
652  "Error closing vstats file, loss of information possible: %s\n",
653  av_err2str(AVERROR(errno)));
654  }
656 
661 
662  uninit_opts();
663 
665 
666  if (received_sigterm) {
667  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
668  (int) received_sigterm);
669  } else if (ret && atomic_load(&transcode_init_done)) {
670  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
671  }
672  term_exit();
673  ffmpeg_exited = 1;
674 }
675 
677 {
678  AVDictionaryEntry *t = NULL;
679 
680  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
682  }
683 }
684 
686 {
688  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
689  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
690  exit_program(1);
691  }
692 }
693 
694 static void abort_codec_experimental(const AVCodec *c, int encoder)
695 {
696  exit_program(1);
697 }
698 
699 static void update_benchmark(const char *fmt, ...)
700 {
701  if (do_benchmark_all) {
703  va_list va;
704  char buf[1024];
705 
706  if (fmt) {
707  va_start(va, fmt);
708  vsnprintf(buf, sizeof(buf), fmt, va);
709  va_end(va);
711  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
714  t.real_usec - current_time.real_usec, buf);
715  }
716  current_time = t;
717  }
718 }
719 
721 {
722  int i;
723  for (i = 0; i < nb_output_streams; i++) {
724  OutputStream *ost2 = output_streams[i];
725  ost2->finished |= ost == ost2 ? this_stream : others;
726  }
727 }
728 
729 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
730 {
731  AVFormatContext *s = of->ctx;
732  AVStream *st = ost->st;
733  int ret;
734 
735  /*
736  * Audio encoders may split the packets -- #frames in != #packets out.
737  * But there is no reordering, so we can limit the number of output packets
738  * by simply dropping them here.
739  * Counting encoded video frames needs to be done separately because of
740  * reordering, see do_video_out().
741  * Do not count the packet when unqueued because it has been counted when queued.
742  */
743  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
744  if (ost->frame_number >= ost->max_frames) {
746  return;
747  }
748  ost->frame_number++;
749  }
750 
751  if (!of->header_written) {
752  AVPacket *tmp_pkt;
753  /* the muxer is not initialized yet, buffer the packet */
754  if (!av_fifo_space(ost->muxing_queue)) {
755  unsigned int are_we_over_size =
757  int new_size = are_we_over_size ?
761 
762  if (new_size <= av_fifo_size(ost->muxing_queue)) {
764  "Too many packets buffered for output stream %d:%d.\n",
765  ost->file_index, ost->st->index);
766  exit_program(1);
767  }
768  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769  if (ret < 0)
770  exit_program(1);
771  }
773  if (ret < 0)
774  exit_program(1);
775  tmp_pkt = av_packet_alloc();
776  if (!tmp_pkt)
777  exit_program(1);
778  av_packet_move_ref(tmp_pkt, pkt);
779  ost->muxing_queue_data_size += tmp_pkt->size;
780  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
781  return;
782  }
783 
786  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
787 
789  int i;
791  NULL);
792  ost->quality = sd ? AV_RL32(sd) : -1;
793  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
794 
795  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
796  if (sd && i < sd[5])
797  ost->error[i] = AV_RL64(sd + 8 + 8*i);
798  else
799  ost->error[i] = -1;
800  }
801 
802  if (ost->frame_rate.num && ost->is_cfr) {
803  if (pkt->duration > 0)
804  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
806  ost->mux_timebase);
807  }
808  }
809 
811 
812  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
813  if (pkt->dts != AV_NOPTS_VALUE &&
814  pkt->pts != AV_NOPTS_VALUE &&
815  pkt->dts > pkt->pts) {
816  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
817  pkt->dts, pkt->pts,
818  ost->file_index, ost->st->index);
819  pkt->pts =
820  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
821  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
822  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
823  }
825  pkt->dts != AV_NOPTS_VALUE &&
828  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
829  if (pkt->dts < max) {
830  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
831  if (exit_on_error)
832  loglevel = AV_LOG_ERROR;
833  av_log(s, loglevel, "Non-monotonous DTS in output stream "
834  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
836  if (exit_on_error) {
837  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
838  exit_program(1);
839  }
840  av_log(s, loglevel, "changing to %"PRId64". This may result "
841  "in incorrect timestamps in the output file.\n",
842  max);
843  if (pkt->pts >= pkt->dts)
844  pkt->pts = FFMAX(pkt->pts, max);
845  pkt->dts = max;
846  }
847  }
848  }
849  ost->last_mux_dts = pkt->dts;
850 
851  ost->data_size += pkt->size;
852  ost->packets_written++;
853 
855 
856  if (debug_ts) {
857  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
858  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
862  pkt->size
863  );
864  }
865 
867  if (ret < 0) {
868  print_error("av_interleaved_write_frame()", ret);
869  main_return_code = 1;
871  }
873 }
874 
876 {
878 
880  if (of->shortest) {
882  of->recording_time = FFMIN(of->recording_time, end);
883  }
884 }
885 
886 /*
887  * Send a single packet to the output, applying any bitstream filters
888  * associated with the output stream. This may result in any number
889  * of packets actually being written, depending on what bitstream
890  * filters are applied. The supplied packet is consumed and will be
891  * blank (as if newly-allocated) when this function returns.
892  *
893  * If eof is set, instead indicate EOF to all bitstream filters and
894  * therefore flush any delayed packets to the output. A blank packet
895  * must be supplied in this case.
896  */
898  OutputStream *ost, int eof)
899 {
900  int ret = 0;
901 
902  /* apply the output bitstream filters */
903  if (ost->bsf_ctx) {
904  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
905  if (ret < 0)
906  goto finish;
907  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
908  write_packet(of, pkt, ost, 0);
909  if (ret == AVERROR(EAGAIN))
910  ret = 0;
911  } else if (!eof)
912  write_packet(of, pkt, ost, 0);
913 
914 finish:
915  if (ret < 0 && ret != AVERROR_EOF) {
916  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
917  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
918  if(exit_on_error)
919  exit_program(1);
920  }
921 }
922 
924 {
926 
927  if (of->recording_time != INT64_MAX &&
929  AV_TIME_BASE_Q) >= 0) {
931  return 0;
932  }
933  return 1;
934 }
935 
937  AVFrame *frame)
938 {
939  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
940  AVCodecContext *enc = ost->enc_ctx;
941  if (!frame || frame->pts == AV_NOPTS_VALUE ||
942  !enc || !ost->filter || !ost->filter->graph->graph)
943  goto early_exit;
944 
945  {
947 
948  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
950  AVRational tb = enc->time_base;
951  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
952 
953  tb.den <<= extra_bits;
954  float_pts =
955  av_rescale_q(frame->pts, filter_tb, tb) -
957  float_pts /= 1 << extra_bits;
958  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
959  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
960 
961  frame->pts =
962  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
964  }
965 
966 early_exit:
967 
968  if (debug_ts) {
969  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
970  frame ? av_ts2str(frame->pts) : "NULL",
971  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
972  float_pts,
973  enc ? enc->time_base.num : -1,
974  enc ? enc->time_base.den : -1);
975  }
976 
977  return float_pts;
978 }
979 
981  char *error, int error_len);
982 
984  unsigned int fatal)
985 {
986  int ret = AVERROR_BUG;
987  char error[1024] = {0};
988 
989  if (ost->initialized)
990  return 0;
991 
992  ret = init_output_stream(ost, frame, error, sizeof(error));
993  if (ret < 0) {
994  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
996 
997  if (fatal)
998  exit_program(1);
999  }
1000 
1001  return ret;
1002 }
1003 
1005  AVFrame *frame)
1006 {
1007  AVCodecContext *enc = ost->enc_ctx;
1008  AVPacket *pkt = ost->pkt;
1009  int ret;
1010 
1012 
1013  if (!check_recording_time(ost))
1014  return;
1015 
1016  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1017  frame->pts = ost->sync_opts;
1018  ost->sync_opts = frame->pts + frame->nb_samples;
1019  ost->samples_encoded += frame->nb_samples;
1020  ost->frames_encoded++;
1021 
1023  if (debug_ts) {
1024  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1025  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1026  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1027  enc->time_base.num, enc->time_base.den);
1028  }
1029 
1030  ret = avcodec_send_frame(enc, frame);
1031  if (ret < 0)
1032  goto error;
1033 
1034  while (1) {
1036  ret = avcodec_receive_packet(enc, pkt);
1037  if (ret == AVERROR(EAGAIN))
1038  break;
1039  if (ret < 0)
1040  goto error;
1041 
1042  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1043 
1045 
1046  if (debug_ts) {
1047  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1051  }
1052 
1053  output_packet(of, pkt, ost, 0);
1054  }
1055 
1056  return;
1057 error:
1058  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1059  exit_program(1);
1060 }
1061 
1062 static void do_subtitle_out(OutputFile *of,
1063  OutputStream *ost,
1064  AVSubtitle *sub)
1065 {
1066  int subtitle_out_max_size = 1024 * 1024;
1067  int subtitle_out_size, nb, i;
1068  AVCodecContext *enc;
1069  AVPacket *pkt = ost->pkt;
1070  int64_t pts;
1071 
1072  if (sub->pts == AV_NOPTS_VALUE) {
1073  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1074  if (exit_on_error)
1075  exit_program(1);
1076  return;
1077  }
1078 
1079  enc = ost->enc_ctx;
1080 
1081  if (!subtitle_out) {
1082  subtitle_out = av_malloc(subtitle_out_max_size);
1083  if (!subtitle_out) {
1084  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1085  exit_program(1);
1086  }
1087  }
1088 
1089  /* Note: DVB subtitle need one packet to draw them and one other
1090  packet to clear them */
1091  /* XXX: signal it in the codec context ? */
1092  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1093  nb = 2;
1094  else
1095  nb = 1;
1096 
1097  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1098  pts = sub->pts;
1101  for (i = 0; i < nb; i++) {
1102  unsigned save_num_rects = sub->num_rects;
1103 
1105  if (!check_recording_time(ost))
1106  return;
1107 
1108  sub->pts = pts;
1109  // start_display_time is required to be 0
1110  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111  sub->end_display_time -= sub->start_display_time;
1112  sub->start_display_time = 0;
1113  if (i == 1)
1114  sub->num_rects = 0;
1115 
1116  ost->frames_encoded++;
1117 
1118  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119  subtitle_out_max_size, sub);
1120  if (i == 1)
1121  sub->num_rects = save_num_rects;
1122  if (subtitle_out_size < 0) {
1123  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1124  exit_program(1);
1125  }
1126 
1128  pkt->data = subtitle_out;
1129  pkt->size = subtitle_out_size;
1131  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133  /* XXX: the pts correction is handled here. Maybe handling
1134  it in the codec would be better */
1135  if (i == 0)
1136  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137  else
1138  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139  }
1140  pkt->dts = pkt->pts;
1141  output_packet(of, pkt, ost, 0);
1142  }
1143 }
1144 
1145 static void do_video_out(OutputFile *of,
1146  OutputStream *ost,
1147  AVFrame *next_picture)
1148 {
1149  int ret, format_video_sync;
1150  AVPacket *pkt = ost->pkt;
1151  AVCodecContext *enc = ost->enc_ctx;
1152  AVRational frame_rate;
1153  int nb_frames, nb0_frames, i;
1154  double delta, delta0;
1155  double duration = 0;
1156  double sync_ipts = AV_NOPTS_VALUE;
1157  int frame_size = 0;
1158  InputStream *ist = NULL;
1160 
1161  init_output_stream_wrapper(ost, next_picture, 1);
1162  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1163 
1164  if (ost->source_index >= 0)
1166 
1167  frame_rate = av_buffersink_get_frame_rate(filter);
1168  if (frame_rate.num > 0 && frame_rate.den > 0)
1169  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1170 
1171  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1173 
1174  if (!ost->filters_script &&
1175  !ost->filters &&
1176  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177  next_picture &&
1178  ist &&
1179  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181  }
1182 
1183  if (!next_picture) {
1184  //end, flushing
1185  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186  ost->last_nb0_frames[1],
1187  ost->last_nb0_frames[2]);
1188  } else {
1189  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190  delta = delta0 + duration;
1191 
1192  /* by default, we output a single frame */
1193  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194  nb_frames = 1;
1195 
1196  format_video_sync = video_sync_method;
1197  if (format_video_sync == VSYNC_AUTO) {
1198  if(!strcmp(of->ctx->oformat->name, "avi")) {
1199  format_video_sync = VSYNC_VFR;
1200  } else
1201  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1202  if ( ist
1203  && format_video_sync == VSYNC_CFR
1204  && input_files[ist->file_index]->ctx->nb_streams == 1
1205  && input_files[ist->file_index]->input_ts_offset == 0) {
1206  format_video_sync = VSYNC_VSCFR;
1207  }
1208  if (format_video_sync == VSYNC_CFR && copy_ts) {
1209  format_video_sync = VSYNC_VSCFR;
1210  }
1211  }
1212  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1213 
1214  if (delta0 < 0 &&
1215  delta > 0 &&
1216  format_video_sync != VSYNC_PASSTHROUGH &&
1217  format_video_sync != VSYNC_DROP) {
1218  if (delta0 < -0.6) {
1219  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1220  } else
1221  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222  sync_ipts = ost->sync_opts;
1223  duration += delta0;
1224  delta0 = 0;
1225  }
1226 
1227  switch (format_video_sync) {
1228  case VSYNC_VSCFR:
1229  if (ost->frame_number == 0 && delta0 >= 0.5) {
1230  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231  delta = duration;
1232  delta0 = 0;
1233  ost->sync_opts = llrint(sync_ipts);
1234  }
1235  case VSYNC_CFR:
1236  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1238  nb_frames = 0;
1239  } else if (delta < -1.1)
1240  nb_frames = 0;
1241  else if (delta > 1.1) {
1242  nb_frames = lrintf(delta);
1243  if (delta0 > 1.1)
1244  nb0_frames = llrintf(delta0 - 0.6);
1245  }
1246  break;
1247  case VSYNC_VFR:
1248  if (delta <= -0.6)
1249  nb_frames = 0;
1250  else if (delta > 0.6)
1251  ost->sync_opts = llrint(sync_ipts);
1252  break;
1253  case VSYNC_DROP:
1254  case VSYNC_PASSTHROUGH:
1255  ost->sync_opts = llrint(sync_ipts);
1256  break;
1257  default:
1258  av_assert0(0);
1259  }
1260  }
1261 
1262  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263  nb0_frames = FFMIN(nb0_frames, nb_frames);
1264 
1265  memmove(ost->last_nb0_frames + 1,
1267  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268  ost->last_nb0_frames[0] = nb0_frames;
1269 
1270  if (nb0_frames == 0 && ost->last_dropped) {
1271  nb_frames_drop++;
1273  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1275  }
1276  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277  if (nb_frames > dts_error_threshold * 30) {
1278  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1279  nb_frames_drop++;
1280  return;
1281  }
1282  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284  if (nb_frames_dup > dup_warning) {
1285  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1286  dup_warning *= 10;
1287  }
1288  }
1289  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1290 
1291  /* duplicates frame if needed */
1292  for (i = 0; i < nb_frames; i++) {
1293  AVFrame *in_picture;
1294  int forced_keyframe = 0;
1295  double pts_time;
1296 
1297  if (i < nb0_frames && ost->last_frame) {
1298  in_picture = ost->last_frame;
1299  } else
1300  in_picture = next_picture;
1301 
1302  if (!in_picture)
1303  return;
1304 
1305  in_picture->pts = ost->sync_opts;
1306 
1307  if (!check_recording_time(ost))
1308  return;
1309 
1310  in_picture->quality = enc->global_quality;
1311  in_picture->pict_type = 0;
1312 
1314  in_picture->pts != AV_NOPTS_VALUE)
1315  ost->forced_kf_ref_pts = in_picture->pts;
1316 
1317  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1318  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1320  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1321  ost->forced_kf_index++;
1322  forced_keyframe = 1;
1323  } else if (ost->forced_keyframes_pexpr) {
1324  double res;
1328  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1334  res);
1335  if (res) {
1336  forced_keyframe = 1;
1342  }
1343 
1345  } else if ( ost->forced_keyframes
1346  && !strncmp(ost->forced_keyframes, "source", 6)
1347  && in_picture->key_frame==1
1348  && !i) {
1349  forced_keyframe = 1;
1350  }
1351 
1352  if (forced_keyframe) {
1353  in_picture->pict_type = AV_PICTURE_TYPE_I;
1354  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1355  }
1356 
1358  if (debug_ts) {
1359  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1360  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1361  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1362  enc->time_base.num, enc->time_base.den);
1363  }
1364 
1365  ost->frames_encoded++;
1366 
1367  ret = avcodec_send_frame(enc, in_picture);
1368  if (ret < 0)
1369  goto error;
1370  // Make sure Closed Captions will not be duplicated
1372 
1373  while (1) {
1375  ret = avcodec_receive_packet(enc, pkt);
1376  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377  if (ret == AVERROR(EAGAIN))
1378  break;
1379  if (ret < 0)
1380  goto error;
1381 
1382  if (debug_ts) {
1383  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1387  }
1388 
1390  pkt->pts = ost->sync_opts;
1391 
1393 
1394  if (debug_ts) {
1395  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1399  }
1400 
1401  frame_size = pkt->size;
1402  output_packet(of, pkt, ost, 0);
1403 
1404  /* if two pass, output log */
1405  if (ost->logfile && enc->stats_out) {
1406  fprintf(ost->logfile, "%s", enc->stats_out);
1407  }
1408  }
1409  ost->sync_opts++;
1410  /*
1411  * For video, number of frames in == number of packets out.
1412  * But there may be reordering, so we can't throw away frames on encoder
1413  * flush, we need to limit them here, before they go into encoder.
1414  */
1415  ost->frame_number++;
1416 
1417  if (vstats_filename && frame_size)
1419  }
1420 
1421  if (!ost->last_frame)
1424  if (next_picture && ost->last_frame)
1425  av_frame_ref(ost->last_frame, next_picture);
1426  else
1428 
1429  return;
1430 error:
1431  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1432  exit_program(1);
1433 }
1434 
1435 static double psnr(double d)
1436 {
1437  return -10.0 * log10(d);
1438 }
1439 
1441 {
1442  AVCodecContext *enc;
1443  int frame_number;
1444  double ti1, bitrate, avg_bitrate;
1445 
1446  /* this is executed just the first time do_video_stats is called */
1447  if (!vstats_file) {
1448  vstats_file = fopen(vstats_filename, "w");
1449  if (!vstats_file) {
1450  perror("fopen");
1451  exit_program(1);
1452  }
1453  }
1454 
1455  enc = ost->enc_ctx;
1456  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457  frame_number = ost->st->nb_frames;
1458  if (vstats_version <= 1) {
1459  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460  ost->quality / (float)FF_QP2LAMBDA);
1461  } else {
1462  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463  ost->quality / (float)FF_QP2LAMBDA);
1464  }
1465 
1466  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1468 
1469  fprintf(vstats_file,"f_size= %6d ", frame_size);
1470  /* compute pts value */
1472  if (ti1 < 0.01)
1473  ti1 = 0.01;
1474 
1475  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1480  }
1481 }
1482 
1484 {
1486  int i;
1487 
1489 
1490  if (of->shortest) {
1491  for (i = 0; i < of->ctx->nb_streams; i++)
1493  }
1494 }
1495 
1496 /**
1497  * Get and encode new output from any of the filtergraphs, without causing
1498  * activity.
1499  *
1500  * @return 0 for success, <0 for severe errors
1501  */
1502 static int reap_filters(int flush)
1503 {
1504  AVFrame *filtered_frame = NULL;
1505  int i;
1506 
1507  /* Reap all buffers present in the buffer sinks */
1508  for (i = 0; i < nb_output_streams; i++) {
1512  AVCodecContext *enc = ost->enc_ctx;
1513  int ret = 0;
1514 
1515  if (!ost->filter || !ost->filter->graph->graph)
1516  continue;
1517  filter = ost->filter->filter;
1518 
1519  /*
1520  * Unlike video, with audio the audio frame size matters.
1521  * Currently we are fully reliant on the lavfi filter chain to
1522  * do the buffering deed for us, and thus the frame size parameter
1523  * needs to be set accordingly. Where does one get the required
1524  * frame size? From the initialized AVCodecContext of an audio
1525  * encoder. Thus, if we have gotten to an audio stream, initialize
1526  * the encoder earlier than receiving the first AVFrame.
1527  */
1530 
1531  if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1532  return AVERROR(ENOMEM);
1533  }
1534  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1535  return AVERROR(ENOMEM);
1536  }
1537  filtered_frame = ost->filtered_frame;
1538 
1539  while (1) {
1540  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1542  if (ret < 0) {
1543  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1545  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1546  } else if (flush && ret == AVERROR_EOF) {
1548  do_video_out(of, ost, NULL);
1549  }
1550  break;
1551  }
1552  if (ost->finished) {
1553  av_frame_unref(filtered_frame);
1554  continue;
1555  }
1556 
1557  switch (av_buffersink_get_type(filter)) {
1558  case AVMEDIA_TYPE_VIDEO:
1559  if (!ost->frame_aspect_ratio.num)
1560  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1561 
1562  do_video_out(of, ost, filtered_frame);
1563  break;
1564  case AVMEDIA_TYPE_AUDIO:
1565  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1566  enc->channels != filtered_frame->channels) {
1568  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1569  break;
1570  }
1571  do_audio_out(of, ost, filtered_frame);
1572  break;
1573  default:
1574  // TODO support subtitle filters
1575  av_assert0(0);
1576  }
1577 
1578  av_frame_unref(filtered_frame);
1579  }
1580  }
1581 
1582  return 0;
1583 }
1584 
1585 static void print_final_stats(int64_t total_size)
1586 {
1587  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1588  uint64_t subtitle_size = 0;
1589  uint64_t data_size = 0;
1590  float percent = -1.0;
1591  int i, j;
1592  int pass1_used = 1;
1593 
1594  for (i = 0; i < nb_output_streams; i++) {
1596  switch (ost->enc_ctx->codec_type) {
1597  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1598  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1599  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1600  default: other_size += ost->data_size; break;
1601  }
1602  extra_size += ost->enc_ctx->extradata_size;
1603  data_size += ost->data_size;
1606  pass1_used = 0;
1607  }
1608 
1609  if (data_size && total_size>0 && total_size >= data_size)
1610  percent = 100.0 * (total_size - data_size) / data_size;
1611 
1612  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1613  video_size / 1024.0,
1614  audio_size / 1024.0,
1615  subtitle_size / 1024.0,
1616  other_size / 1024.0,
1617  extra_size / 1024.0);
1618  if (percent >= 0.0)
1619  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1620  else
1621  av_log(NULL, AV_LOG_INFO, "unknown");
1622  av_log(NULL, AV_LOG_INFO, "\n");
1623 
1624  /* print verbose per-stream stats */
1625  for (i = 0; i < nb_input_files; i++) {
1626  InputFile *f = input_files[i];
1627  uint64_t total_packets = 0, total_size = 0;
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1630  i, f->ctx->url);
1631 
1632  for (j = 0; j < f->nb_streams; j++) {
1633  InputStream *ist = input_streams[f->ist_index + j];
1634  enum AVMediaType type = ist->dec_ctx->codec_type;
1635 
1636  total_size += ist->data_size;
1637  total_packets += ist->nb_packets;
1638 
1639  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1640  i, j, media_type_string(type));
1641  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1642  ist->nb_packets, ist->data_size);
1643 
1644  if (ist->decoding_needed) {
1645  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1646  ist->frames_decoded);
1647  if (type == AVMEDIA_TYPE_AUDIO)
1648  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1649  av_log(NULL, AV_LOG_VERBOSE, "; ");
1650  }
1651 
1652  av_log(NULL, AV_LOG_VERBOSE, "\n");
1653  }
1654 
1655  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1656  total_packets, total_size);
1657  }
1658 
1659  for (i = 0; i < nb_output_files; i++) {
1660  OutputFile *of = output_files[i];
1661  uint64_t total_packets = 0, total_size = 0;
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1664  i, of->ctx->url);
1665 
1666  for (j = 0; j < of->ctx->nb_streams; j++) {
1669 
1670  total_size += ost->data_size;
1671  total_packets += ost->packets_written;
1672 
1673  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1674  i, j, media_type_string(type));
1675  if (ost->encoding_needed) {
1676  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1677  ost->frames_encoded);
1678  if (type == AVMEDIA_TYPE_AUDIO)
1679  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1680  av_log(NULL, AV_LOG_VERBOSE, "; ");
1681  }
1682 
1683  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1685 
1686  av_log(NULL, AV_LOG_VERBOSE, "\n");
1687  }
1688 
1689  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1690  total_packets, total_size);
1691  }
1692  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1693  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1694  if (pass1_used) {
1695  av_log(NULL, AV_LOG_WARNING, "\n");
1696  } else {
1697  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1698  }
1699  }
1700 }
1701 
1702 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1703 {
1704  AVBPrint buf, buf_script;
1705  OutputStream *ost;
1706  AVFormatContext *oc;
1707  int64_t total_size;
1708  AVCodecContext *enc;
1709  int frame_number, vid, i;
1710  double bitrate;
1711  double speed;
1712  int64_t pts = INT64_MIN + 1;
1713  static int64_t last_time = -1;
1714  static int first_report = 1;
1715  static int qp_histogram[52];
1716  int hours, mins, secs, us;
1717  const char *hours_sign;
1718  int ret;
1719  float t;
1720 
1721  if (!print_stats && !is_last_report && !progress_avio)
1722  return;
1723 
1724  if (!is_last_report) {
1725  if (last_time == -1) {
1726  last_time = cur_time;
1727  }
1728  if (((cur_time - last_time) < stats_period && !first_report) ||
1729  (first_report && nb_output_dumped < nb_output_files))
1730  return;
1731  last_time = cur_time;
1732  }
1733 
1734  t = (cur_time-timer_start) / 1000000.0;
1735 
1736 
1737  oc = output_files[0]->ctx;
1738 
1739  total_size = avio_size(oc->pb);
1740  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1741  total_size = avio_tell(oc->pb);
1742 
1743  vid = 0;
1745  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1746  for (i = 0; i < nb_output_streams; i++) {
1747  float q = -1;
1748  ost = output_streams[i];
1749  enc = ost->enc_ctx;
1750  if (!ost->stream_copy)
1751  q = ost->quality / (float) FF_QP2LAMBDA;
1752 
1753  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1754  av_bprintf(&buf, "q=%2.1f ", q);
1755  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1756  ost->file_index, ost->index, q);
1757  }
1758  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1759  float fps;
1760 
1761  frame_number = ost->frame_number;
1762  fps = t > 1 ? frame_number / t : 0;
1763  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1764  frame_number, fps < 9.95, fps, q);
1765  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1766  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1767  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1768  ost->file_index, ost->index, q);
1769  if (is_last_report)
1770  av_bprintf(&buf, "L");
1771  if (qp_hist) {
1772  int j;
1773  int qp = lrintf(q);
1774  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1775  qp_histogram[qp]++;
1776  for (j = 0; j < 32; j++)
1777  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1778  }
1779 
1780  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1781  int j;
1782  double error, error_sum = 0;
1783  double scale, scale_sum = 0;
1784  double p;
1785  char type[3] = { 'Y','U','V' };
1786  av_bprintf(&buf, "PSNR=");
1787  for (j = 0; j < 3; j++) {
1788  if (is_last_report) {
1789  error = enc->error[j];
1790  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1791  } else {
1792  error = ost->error[j];
1793  scale = enc->width * enc->height * 255.0 * 255.0;
1794  }
1795  if (j)
1796  scale /= 4;
1797  error_sum += error;
1798  scale_sum += scale;
1799  p = psnr(error / scale);
1800  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1801  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1802  ost->file_index, ost->index, type[j] | 32, p);
1803  }
1804  p = psnr(error_sum / scale_sum);
1805  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1806  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1807  ost->file_index, ost->index, p);
1808  }
1809  vid = 1;
1810  }
1811  /* compute min output value */
1815  if (copy_ts) {
1816  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1820  }
1821  }
1822 
1823  if (is_last_report)
1825  }
1826 
1827  secs = FFABS(pts) / AV_TIME_BASE;
1828  us = FFABS(pts) % AV_TIME_BASE;
1829  mins = secs / 60;
1830  secs %= 60;
1831  hours = mins / 60;
1832  mins %= 60;
1833  hours_sign = (pts < 0) ? "-" : "";
1834 
1835  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1836  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1837 
1838  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1839  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1840  if (pts == AV_NOPTS_VALUE) {
1841  av_bprintf(&buf, "N/A ");
1842  } else {
1843  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1844  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1845  }
1846 
1847  if (bitrate < 0) {
1848  av_bprintf(&buf, "bitrate=N/A");
1849  av_bprintf(&buf_script, "bitrate=N/A\n");
1850  }else{
1851  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1852  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1853  }
1854 
1855  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1856  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1857  if (pts == AV_NOPTS_VALUE) {
1858  av_bprintf(&buf_script, "out_time_us=N/A\n");
1859  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1860  av_bprintf(&buf_script, "out_time=N/A\n");
1861  } else {
1862  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1863  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1864  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1865  hours_sign, hours, mins, secs, us);
1866  }
1867 
1869  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1870  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1871  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1872 
1873  if (speed < 0) {
1874  av_bprintf(&buf, " speed=N/A");
1875  av_bprintf(&buf_script, "speed=N/A\n");
1876  } else {
1877  av_bprintf(&buf, " speed=%4.3gx", speed);
1878  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1879  }
1880 
1881  if (print_stats || is_last_report) {
1882  const char end = is_last_report ? '\n' : '\r';
1883  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1884  fprintf(stderr, "%s %c", buf.str, end);
1885  } else
1886  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1887 
1888  fflush(stderr);
1889  }
1890  av_bprint_finalize(&buf, NULL);
1891 
1892  if (progress_avio) {
1893  av_bprintf(&buf_script, "progress=%s\n",
1894  is_last_report ? "end" : "continue");
1895  avio_write(progress_avio, buf_script.str,
1896  FFMIN(buf_script.len, buf_script.size - 1));
1898  av_bprint_finalize(&buf_script, NULL);
1899  if (is_last_report) {
1900  if ((ret = avio_closep(&progress_avio)) < 0)
1902  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1903  }
1904  }
1905 
1906  first_report = 0;
1907 
1908  if (is_last_report)
1909  print_final_stats(total_size);
1910 }
1911 
1913 {
1914  // We never got any input. Set a fake format, which will
1915  // come from libavformat.
1916  ifilter->format = par->format;
1917  ifilter->sample_rate = par->sample_rate;
1918  ifilter->channels = par->channels;
1919  ifilter->channel_layout = par->channel_layout;
1920  ifilter->width = par->width;
1921  ifilter->height = par->height;
1922  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1923 }
1924 
1925 static void flush_encoders(void)
1926 {
1927  int i, ret;
1928 
1929  for (i = 0; i < nb_output_streams; i++) {
1931  AVCodecContext *enc = ost->enc_ctx;
1933 
1934  if (!ost->encoding_needed)
1935  continue;
1936 
1937  // Try to enable encoding with no input frames.
1938  // Maybe we should just let encoding fail instead.
1939  if (!ost->initialized) {
1940  FilterGraph *fg = ost->filter->graph;
1941 
1943  "Finishing stream %d:%d without any data written to it.\n",
1944  ost->file_index, ost->st->index);
1945 
1946  if (ost->filter && !fg->graph) {
1947  int x;
1948  for (x = 0; x < fg->nb_inputs; x++) {
1949  InputFilter *ifilter = fg->inputs[x];
1950  if (ifilter->format < 0)
1951  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1952  }
1953 
1955  continue;
1956 
1957  ret = configure_filtergraph(fg);
1958  if (ret < 0) {
1959  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1960  exit_program(1);
1961  }
1962 
1964  }
1965 
1967  }
1968 
1970  continue;
1971 
1972  for (;;) {
1973  const char *desc = NULL;
1974  AVPacket *pkt = ost->pkt;
1975  int pkt_size;
1976 
1977  switch (enc->codec_type) {
1978  case AVMEDIA_TYPE_AUDIO:
1979  desc = "audio";
1980  break;
1981  case AVMEDIA_TYPE_VIDEO:
1982  desc = "video";
1983  break;
1984  default:
1985  av_assert0(0);
1986  }
1987 
1989 
1991  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1992  ret = avcodec_send_frame(enc, NULL);
1993  if (ret < 0) {
1994  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1995  desc,
1996  av_err2str(ret));
1997  exit_program(1);
1998  }
1999  }
2000 
2001  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2002  if (ret < 0 && ret != AVERROR_EOF) {
2003  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2004  desc,
2005  av_err2str(ret));
2006  exit_program(1);
2007  }
2008  if (ost->logfile && enc->stats_out) {
2009  fprintf(ost->logfile, "%s", enc->stats_out);
2010  }
2011  if (ret == AVERROR_EOF) {
2012  output_packet(of, pkt, ost, 1);
2013  break;
2014  }
2015  if (ost->finished & MUXER_FINISHED) {
2017  continue;
2018  }
2020  pkt_size = pkt->size;
2021  output_packet(of, pkt, ost, 0);
2023  do_video_stats(ost, pkt_size);
2024  }
2025  }
2026  }
2027 }
2028 
2029 /*
2030  * Check whether a packet from ist should be written into ost at this time
2031  */
2033 {
2035  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2036 
2037  if (ost->source_index != ist_index)
2038  return 0;
2039 
2040  if (ost->finished)
2041  return 0;
2042 
2043  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2044  return 0;
2045 
2046  return 1;
2047 }
2048 
2050 {
2052  InputFile *f = input_files [ist->file_index];
2053  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2054  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2055  AVPacket *opkt = ost->pkt;
2056 
2057  av_packet_unref(opkt);
2058  // EOF: flush output bitstream filters.
2059  if (!pkt) {
2060  output_packet(of, opkt, ost, 1);
2061  return;
2062  }
2063 
2064  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2066  return;
2067 
2068  if (!ost->frame_number && !ost->copy_prior_start) {
2069  int64_t comp_start = start_time;
2070  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2071  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2072  if (pkt->pts == AV_NOPTS_VALUE ?
2073  ist->pts < comp_start :
2074  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2075  return;
2076  }
2077 
2078  if (of->recording_time != INT64_MAX &&
2079  ist->pts >= of->recording_time + start_time) {
2081  return;
2082  }
2083 
2084  if (f->recording_time != INT64_MAX) {
2085  start_time = f->ctx->start_time;
2086  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2087  start_time += f->start_time;
2088  if (ist->pts >= f->recording_time + start_time) {
2090  return;
2091  }
2092  }
2093 
2094  /* force the input stream PTS */
2096  ost->sync_opts++;
2097 
2098  if (av_packet_ref(opkt, pkt) < 0)
2099  exit_program(1);
2100 
2101  if (pkt->pts != AV_NOPTS_VALUE)
2102  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2103 
2104  if (pkt->dts == AV_NOPTS_VALUE) {
2106  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2107  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2108  if(!duration)
2109  duration = ist->dec_ctx->frame_size;
2110  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2111  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2112  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2113  /* dts will be set immediately afterwards to what pts is now */
2114  opkt->pts = opkt->dts - ost_tb_start_time;
2115  } else
2116  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2117  opkt->dts -= ost_tb_start_time;
2118 
2119  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2120 
2121  output_packet(of, opkt, ost, 0);
2122 }
2123 
2125 {
2126  AVCodecContext *dec = ist->dec_ctx;
2127 
2128  if (!dec->channel_layout) {
2129  char layout_name[256];
2130 
2131  if (dec->channels > ist->guess_layout_max)
2132  return 0;
2133  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2134  if (!dec->channel_layout)
2135  return 0;
2136  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2137  dec->channels, dec->channel_layout);
2138  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2139  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2140  }
2141  return 1;
2142 }
2143 
2145 {
2146  if (*got_output || ret<0)
2147  decode_error_stat[ret<0] ++;
2148 
2149  if (ret < 0 && exit_on_error)
2150  exit_program(1);
2151 
2152  if (*got_output && ist) {
2153  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2155  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2156  if (exit_on_error)
2157  exit_program(1);
2158  }
2159  }
2160 }
2161 
2162 // Filters can be configured only if the formats of all inputs are known.
2164 {
2165  int i;
2166  for (i = 0; i < fg->nb_inputs; i++) {
2167  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2168  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2169  return 0;
2170  }
2171  return 1;
2172 }
2173 
2175 {
2176  FilterGraph *fg = ifilter->graph;
2177  int need_reinit, ret, i;
2178 
2179  /* determine if the parameters for this input changed */
2180  need_reinit = ifilter->format != frame->format;
2181 
2182  switch (ifilter->ist->st->codecpar->codec_type) {
2183  case AVMEDIA_TYPE_AUDIO:
2184  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2185  ifilter->channels != frame->channels ||
2186  ifilter->channel_layout != frame->channel_layout;
2187  break;
2188  case AVMEDIA_TYPE_VIDEO:
2189  need_reinit |= ifilter->width != frame->width ||
2190  ifilter->height != frame->height;
2191  break;
2192  }
2193 
2194  if (!ifilter->ist->reinit_filters && fg->graph)
2195  need_reinit = 0;
2196 
2197  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2198  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2199  need_reinit = 1;
2200 
2201  if (need_reinit) {
2203  if (ret < 0)
2204  return ret;
2205  }
2206 
2207  /* (re)init the graph if possible, otherwise buffer the frame and return */
2208  if (need_reinit || !fg->graph) {
2209  for (i = 0; i < fg->nb_inputs; i++) {
2210  if (!ifilter_has_all_input_formats(fg)) {
2212  if (!tmp)
2213  return AVERROR(ENOMEM);
2215 
2216  if (!av_fifo_space(ifilter->frame_queue)) {
2217  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2218  if (ret < 0) {
2219  av_frame_free(&tmp);
2220  return ret;
2221  }
2222  }
2223  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2224  return 0;
2225  }
2226  }
2227 
2228  ret = reap_filters(1);
2229  if (ret < 0 && ret != AVERROR_EOF) {
2230  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2231  return ret;
2232  }
2233 
2234  ret = configure_filtergraph(fg);
2235  if (ret < 0) {
2236  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2237  return ret;
2238  }
2239  }
2240 
2242  if (ret < 0) {
2243  if (ret != AVERROR_EOF)
2244  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2245  return ret;
2246  }
2247 
2248  return 0;
2249 }
2250 
2251 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2252 {
2253  int ret;
2254 
2255  ifilter->eof = 1;
2256 
2257  if (ifilter->filter) {
2259  if (ret < 0)
2260  return ret;
2261  } else {
2262  // the filtergraph was never configured
2263  if (ifilter->format < 0)
2264  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2265  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2266  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2267  return AVERROR_INVALIDDATA;
2268  }
2269  }
2270 
2271  return 0;
2272 }
2273 
2274 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2275 // There is the following difference: if you got a frame, you must call
2276 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2277 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2278 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2279 {
2280  int ret;
2281 
2282  *got_frame = 0;
2283 
2284  if (pkt) {
2285  ret = avcodec_send_packet(avctx, pkt);
2286  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2287  // decoded frames with avcodec_receive_frame() until done.
2288  if (ret < 0 && ret != AVERROR_EOF)
2289  return ret;
2290  }
2291 
2292  ret = avcodec_receive_frame(avctx, frame);
2293  if (ret < 0 && ret != AVERROR(EAGAIN))
2294  return ret;
2295  if (ret >= 0)
2296  *got_frame = 1;
2297 
2298  return 0;
2299 }
2300 
2302 {
2303  int i, ret;
2304  AVFrame *f;
2305 
2306  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2307  for (i = 0; i < ist->nb_filters; i++) {
2308  if (i < ist->nb_filters - 1) {
2309  f = ist->filter_frame;
2311  if (ret < 0)
2312  break;
2313  } else
2314  f = decoded_frame;
2315  ret = ifilter_send_frame(ist->filters[i], f);
2316  if (ret == AVERROR_EOF)
2317  ret = 0; /* ignore */
2318  if (ret < 0) {
2320  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2321  break;
2322  }
2323  }
2324  return ret;
2325 }
2326 
2328  int *decode_failed)
2329 {
2331  AVCodecContext *avctx = ist->dec_ctx;
2332  int ret, err = 0;
2333  AVRational decoded_frame_tb;
2334 
2335  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2336  return AVERROR(ENOMEM);
2337  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2338  return AVERROR(ENOMEM);
2339  decoded_frame = ist->decoded_frame;
2340 
2342  ret = decode(avctx, decoded_frame, got_output, pkt);
2343  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2344  if (ret < 0)
2345  *decode_failed = 1;
2346 
2347  if (ret >= 0 && avctx->sample_rate <= 0) {
2348  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2350  }
2351 
2352  if (ret != AVERROR_EOF)
2354 
2355  if (!*got_output || ret < 0)
2356  return ret;
2357 
2358  ist->samples_decoded += decoded_frame->nb_samples;
2359  ist->frames_decoded++;
2360 
2361  /* increment next_dts to use for the case where the input stream does not
2362  have timestamps or there are multiple frames in the packet */
2363  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2364  avctx->sample_rate;
2365  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2366  avctx->sample_rate;
2367 
2368  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2369  decoded_frame_tb = ist->st->time_base;
2370  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2371  decoded_frame->pts = pkt->pts;
2372  decoded_frame_tb = ist->st->time_base;
2373  }else {
2374  decoded_frame->pts = ist->dts;
2375  decoded_frame_tb = AV_TIME_BASE_Q;
2376  }
2378  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2379  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2380  (AVRational){1, avctx->sample_rate});
2381  ist->nb_samples = decoded_frame->nb_samples;
2383 
2384  av_frame_unref(ist->filter_frame);
2386  return err < 0 ? err : ret;
2387 }
2388 
2389 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2390  int *decode_failed)
2391 {
2393  int i, ret = 0, err = 0;
2394  int64_t best_effort_timestamp;
2395  int64_t dts = AV_NOPTS_VALUE;
2396 
2397  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2398  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2399  // skip the packet.
2400  if (!eof && pkt && pkt->size == 0)
2401  return 0;
2402 
2403  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2404  return AVERROR(ENOMEM);
2405  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2406  return AVERROR(ENOMEM);
2407  decoded_frame = ist->decoded_frame;
2408  if (ist->dts != AV_NOPTS_VALUE)
2409  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2410  if (pkt) {
2411  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2412  }
2413 
2414  // The old code used to set dts on the drain packet, which does not work
2415  // with the new API anymore.
2416  if (eof) {
2417  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2418  if (!new)
2419  return AVERROR(ENOMEM);
2420  ist->dts_buffer = new;
2421  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2422  }
2423 
2425  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2426  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2427  if (ret < 0)
2428  *decode_failed = 1;
2429 
2430  // The following line may be required in some cases where there is no parser
2431  // or the parser does not has_b_frames correctly
2432  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2433  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2434  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2435  } else
2436  av_log(ist->dec_ctx, AV_LOG_WARNING,
2437  "video_delay is larger in decoder than demuxer %d > %d.\n"
2438  "If you want to help, upload a sample "
2439  "of this file to https://streams.videolan.org/upload/ "
2440  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2441  ist->dec_ctx->has_b_frames,
2442  ist->st->codecpar->video_delay);
2443  }
2444 
2445  if (ret != AVERROR_EOF)
2447 
2448  if (*got_output && ret >= 0) {
2449  if (ist->dec_ctx->width != decoded_frame->width ||
2450  ist->dec_ctx->height != decoded_frame->height ||
2451  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2452  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2456  ist->dec_ctx->width,
2457  ist->dec_ctx->height,
2458  ist->dec_ctx->pix_fmt);
2459  }
2460  }
2461 
2462  if (!*got_output || ret < 0)
2463  return ret;
2464 
2465  if(ist->top_field_first>=0)
2466  decoded_frame->top_field_first = ist->top_field_first;
2467 
2468  ist->frames_decoded++;
2469 
2470  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2471  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2472  if (err < 0)
2473  goto fail;
2474  }
2475  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2476 
2477  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2478  *duration_pts = decoded_frame->pkt_duration;
2479 
2480  if (ist->framerate.num)
2481  best_effort_timestamp = ist->cfr_next_pts++;
2482 
2483  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2484  best_effort_timestamp = ist->dts_buffer[0];
2485 
2486  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2487  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2488  ist->nb_dts_buffer--;
2489  }
2490 
2491  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2492  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2493 
2494  if (ts != AV_NOPTS_VALUE)
2495  ist->next_pts = ist->pts = ts;
2496  }
2497 
2498  if (debug_ts) {
2499  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2500  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2501  ist->st->index, av_ts2str(decoded_frame->pts),
2502  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2503  best_effort_timestamp,
2504  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2506  ist->st->time_base.num, ist->st->time_base.den);
2507  }
2508 
2509  if (ist->st->sample_aspect_ratio.num)
2510  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2511 
2513 
2514 fail:
2515  av_frame_unref(ist->filter_frame);
2517  return err < 0 ? err : ret;
2518 }
2519 
2521  int *decode_failed)
2522 {
2524  int free_sub = 1;
2525  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2526  &subtitle, got_output, pkt);
2527 
2529 
2530  if (ret < 0 || !*got_output) {
2531  *decode_failed = 1;
2532  if (!pkt->size)
2534  return ret;
2535  }
2536 
2537  if (ist->fix_sub_duration) {
2538  int end = 1;
2539  if (ist->prev_sub.got_output) {
2540  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2541  1000, AV_TIME_BASE);
2542  if (end < ist->prev_sub.subtitle.end_display_time) {
2543  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2544  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2545  ist->prev_sub.subtitle.end_display_time, end,
2546  end <= 0 ? ", dropping it" : "");
2547  ist->prev_sub.subtitle.end_display_time = end;
2548  }
2549  }
2550  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2551  FFSWAP(int, ret, ist->prev_sub.ret);
2552  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2553  if (end <= 0)
2554  goto out;
2555  }
2556 
2557  if (!*got_output)
2558  return ret;
2559 
2560  if (ist->sub2video.frame) {
2561  sub2video_update(ist, INT64_MIN, &subtitle);
2562  } else if (ist->nb_filters) {
2563  if (!ist->sub2video.sub_queue)
2564  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2565  if (!ist->sub2video.sub_queue)
2566  exit_program(1);
2567  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2568  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2569  if (ret < 0)
2570  exit_program(1);
2571  }
2572  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2573  free_sub = 0;
2574  }
2575 
2576  if (!subtitle.num_rects)
2577  goto out;
2578 
2579  ist->frames_decoded++;
2580 
2581  for (i = 0; i < nb_output_streams; i++) {
2583 
2584  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2585  exit_program(1);
2587  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2588  continue;
2589 
2591  }
2592 
2593 out:
2594  if (free_sub)
2596  return ret;
2597 }
2598 
2600 {
2601  int i, ret;
2602  /* TODO keep pts also in stream time base to avoid converting back */
2603  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2605 
2606  for (i = 0; i < ist->nb_filters; i++) {
2607  ret = ifilter_send_eof(ist->filters[i], pts);
2608  if (ret < 0)
2609  return ret;
2610  }
2611  return 0;
2612 }
2613 
2614 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2615 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2616 {
2617  int ret = 0, i;
2618  int repeating = 0;
2619  int eof_reached = 0;
2620 
2621  AVPacket *avpkt;
2622 
2623  if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2624  return AVERROR(ENOMEM);
2625  avpkt = ist->pkt;
2626 
2627  if (!ist->saw_first_ts) {
2628  ist->first_dts =
2629  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2630  ist->pts = 0;
2631  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2632  ist->first_dts =
2633  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2634  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2635  }
2636  ist->saw_first_ts = 1;
2637  }
2638 
2639  if (ist->next_dts == AV_NOPTS_VALUE)
2640  ist->next_dts = ist->dts;
2641  if (ist->next_pts == AV_NOPTS_VALUE)
2642  ist->next_pts = ist->pts;
2643 
2644  if (pkt) {
2645  av_packet_unref(avpkt);
2646  ret = av_packet_ref(avpkt, pkt);
2647  if (ret < 0)
2648  return ret;
2649  }
2650 
2651  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2652  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2653  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2654  ist->next_pts = ist->pts = ist->dts;
2655  }
2656 
2657  // while we have more to decode or while the decoder did output something on EOF
2658  while (ist->decoding_needed) {
2659  int64_t duration_dts = 0;
2660  int64_t duration_pts = 0;
2661  int got_output = 0;
2662  int decode_failed = 0;
2663 
2664  ist->pts = ist->next_pts;
2665  ist->dts = ist->next_dts;
2666 
2667  switch (ist->dec_ctx->codec_type) {
2668  case AVMEDIA_TYPE_AUDIO:
2669  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2670  &decode_failed);
2671  av_packet_unref(avpkt);
2672  break;
2673  case AVMEDIA_TYPE_VIDEO:
2674  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2675  &decode_failed);
2676  if (!repeating || !pkt || got_output) {
2677  if (pkt && pkt->duration) {
2678  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2679  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2680  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2681  duration_dts = ((int64_t)AV_TIME_BASE *
2682  ist->dec_ctx->framerate.den * ticks) /
2683  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2684  }
2685 
2686  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2687  ist->next_dts += duration_dts;
2688  }else
2689  ist->next_dts = AV_NOPTS_VALUE;
2690  }
2691 
2692  if (got_output) {
2693  if (duration_pts > 0) {
2694  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2695  } else {
2696  ist->next_pts += duration_dts;
2697  }
2698  }
2699  av_packet_unref(avpkt);
2700  break;
2701  case AVMEDIA_TYPE_SUBTITLE:
2702  if (repeating)
2703  break;
2704  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2705  if (!pkt && ret >= 0)
2706  ret = AVERROR_EOF;
2707  av_packet_unref(avpkt);
2708  break;
2709  default:
2710  return -1;
2711  }
2712 
2713  if (ret == AVERROR_EOF) {
2714  eof_reached = 1;
2715  break;
2716  }
2717 
2718  if (ret < 0) {
2719  if (decode_failed) {
2720  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2721  ist->file_index, ist->st->index, av_err2str(ret));
2722  } else {
2723  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2724  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2725  }
2726  if (!decode_failed || exit_on_error)
2727  exit_program(1);
2728  break;
2729  }
2730 
2731  if (got_output)
2732  ist->got_output = 1;
2733 
2734  if (!got_output)
2735  break;
2736 
2737  // During draining, we might get multiple output frames in this loop.
2738  // ffmpeg.c does not drain the filter chain on configuration changes,
2739  // which means if we send multiple frames at once to the filters, and
2740  // one of those frames changes configuration, the buffered frames will
2741  // be lost. This can upset certain FATE tests.
2742  // Decode only 1 frame per call on EOF to appease these FATE tests.
2743  // The ideal solution would be to rewrite decoding to use the new
2744  // decoding API in a better way.
2745  if (!pkt)
2746  break;
2747 
2748  repeating = 1;
2749  }
2750 
2751  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2752  /* except when looping we need to flush but not to send an EOF */
2753  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2754  int ret = send_filter_eof(ist);
2755  if (ret < 0) {
2756  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2757  exit_program(1);
2758  }
2759  }
2760 
2761  /* handle stream copy */
2762  if (!ist->decoding_needed && pkt) {
2763  ist->dts = ist->next_dts;
2764  switch (ist->dec_ctx->codec_type) {
2765  case AVMEDIA_TYPE_AUDIO:
2766  av_assert1(pkt->duration >= 0);
2767  if (ist->dec_ctx->sample_rate) {
2768  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2769  ist->dec_ctx->sample_rate;
2770  } else {
2771  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2772  }
2773  break;
2774  case AVMEDIA_TYPE_VIDEO:
2775  if (ist->framerate.num) {
2776  // TODO: Remove work-around for c99-to-c89 issue 7
2777  AVRational time_base_q = AV_TIME_BASE_Q;
2778  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2779  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2780  } else if (pkt->duration) {
2781  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2782  } else if(ist->dec_ctx->framerate.num != 0) {
2783  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2784  ist->next_dts += ((int64_t)AV_TIME_BASE *
2785  ist->dec_ctx->framerate.den * ticks) /
2786  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2787  }
2788  break;
2789  }
2790  ist->pts = ist->dts;
2791  ist->next_pts = ist->next_dts;
2792  }
2793  for (i = 0; i < nb_output_streams; i++) {
2795 
2796  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2797  exit_program(1);
2799  continue;
2800 
2801  do_streamcopy(ist, ost, pkt);
2802  }
2803 
2804  return !eof_reached;
2805 }
2806 
2807 static void print_sdp(void)
2808 {
2809  char sdp[16384];
2810  int i;
2811  int j;
2812  AVIOContext *sdp_pb;
2813  AVFormatContext **avc;
2814 
2815  for (i = 0; i < nb_output_files; i++) {
2816  if (!output_files[i]->header_written)
2817  return;
2818  }
2819 
2820  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2821  if (!avc)
2822  exit_program(1);
2823  for (i = 0, j = 0; i < nb_output_files; i++) {
2824  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2825  avc[j] = output_files[i]->ctx;
2826  j++;
2827  }
2828  }
2829 
2830  if (!j)
2831  goto fail;
2832 
2833  av_sdp_create(avc, j, sdp, sizeof(sdp));
2834 
2835  if (!sdp_filename) {
2836  printf("SDP:\n%s\n", sdp);
2837  fflush(stdout);
2838  } else {
2839  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2840  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2841  } else {
2842  avio_print(sdp_pb, sdp);
2843  avio_closep(&sdp_pb);
2845  }
2846  }
2847 
2848 fail:
2849  av_freep(&avc);
2850 }
2851 
2853 {
2854  InputStream *ist = s->opaque;
2855  const enum AVPixelFormat *p;
2856  int ret;
2857 
2858  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2860  const AVCodecHWConfig *config = NULL;
2861  int i;
2862 
2863  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2864  break;
2865 
2866  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2867  ist->hwaccel_id == HWACCEL_AUTO) {
2868  for (i = 0;; i++) {
2869  config = avcodec_get_hw_config(s->codec, i);
2870  if (!config)
2871  break;
2872  if (!(config->methods &
2874  continue;
2875  if (config->pix_fmt == *p)
2876  break;
2877  }
2878  }
2879  if (config) {
2880  if (config->device_type != ist->hwaccel_device_type) {
2881  // Different hwaccel offered, ignore.
2882  continue;
2883  }
2884 
2886  if (ret < 0) {
2887  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2889  "%s hwaccel requested for input stream #%d:%d, "
2890  "but cannot be initialized.\n",
2891  av_hwdevice_get_type_name(config->device_type),
2892  ist->file_index, ist->st->index);
2893  return AV_PIX_FMT_NONE;
2894  }
2895  continue;
2896  }
2897  } else {
2898  const HWAccel *hwaccel = NULL;
2899  int i;
2900  for (i = 0; hwaccels[i].name; i++) {
2901  if (hwaccels[i].pix_fmt == *p) {
2902  hwaccel = &hwaccels[i];
2903  break;
2904  }
2905  }
2906  if (!hwaccel) {
2907  // No hwaccel supporting this pixfmt.
2908  continue;
2909  }
2910  if (hwaccel->id != ist->hwaccel_id) {
2911  // Does not match requested hwaccel.
2912  continue;
2913  }
2914 
2915  ret = hwaccel->init(s);
2916  if (ret < 0) {
2918  "%s hwaccel requested for input stream #%d:%d, "
2919  "but cannot be initialized.\n", hwaccel->name,
2920  ist->file_index, ist->st->index);
2921  return AV_PIX_FMT_NONE;
2922  }
2923  }
2924 
2925  if (ist->hw_frames_ctx) {
2926  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2927  if (!s->hw_frames_ctx)
2928  return AV_PIX_FMT_NONE;
2929  }
2930 
2931  ist->hwaccel_pix_fmt = *p;
2932  break;
2933  }
2934 
2935  return *p;
2936 }
2937 
2939 {
2940  InputStream *ist = s->opaque;
2941 
2942  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2943  return ist->hwaccel_get_buffer(s, frame, flags);
2944 
2946 }
2947 
2948 static int init_input_stream(int ist_index, char *error, int error_len)
2949 {
2950  int ret;
2951  InputStream *ist = input_streams[ist_index];
2952 
2953  if (ist->decoding_needed) {
2954  const AVCodec *codec = ist->dec;
2955  if (!codec) {
2956  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2957  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2958  return AVERROR(EINVAL);
2959  }
2960 
2961  ist->dec_ctx->opaque = ist;
2962  ist->dec_ctx->get_format = get_format;
2963  ist->dec_ctx->get_buffer2 = get_buffer;
2964 #if LIBAVCODEC_VERSION_MAJOR < 60
2966  ist->dec_ctx->thread_safe_callbacks = 1;
2968 #endif
2969 
2970  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2971  (ist->decoding_needed & DECODING_FOR_OST)) {
2972  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2973  if (ist->decoding_needed & DECODING_FOR_FILTER)
2974  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2975  }
2976 
2977  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2978 
2979  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2980  * audio, and video decoders such as cuvid or mediacodec */
2981  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2982 
2983  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2984  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2985  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2986  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2987  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2988 
2990  if (ret < 0) {
2991  snprintf(error, error_len, "Device setup failed for "
2992  "decoder on input stream #%d:%d : %s",
2993  ist->file_index, ist->st->index, av_err2str(ret));
2994  return ret;
2995  }
2996 
2997  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2998  if (ret == AVERROR_EXPERIMENTAL)
2999  abort_codec_experimental(codec, 0);
3000 
3001  snprintf(error, error_len,
3002  "Error while opening decoder for input stream "
3003  "#%d:%d : %s",
3004  ist->file_index, ist->st->index, av_err2str(ret));
3005  return ret;
3006  }
3007  assert_avoptions(ist->decoder_opts);
3008  }
3009 
3010  ist->next_pts = AV_NOPTS_VALUE;
3011  ist->next_dts = AV_NOPTS_VALUE;
3012 
3013  return 0;
3014 }
3015 
3017 {
3018  if (ost->source_index >= 0)
3019  return input_streams[ost->source_index];
3020  return NULL;
3021 }
3022 
3023 static int compare_int64(const void *a, const void *b)
3024 {
3025  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3026 }
3027 
3028 /* open the muxer when all the streams are initialized */
3030 {
3031  int ret, i;
3032 
3033  for (i = 0; i < of->ctx->nb_streams; i++) {
3035  if (!ost->initialized)
3036  return 0;
3037  }
3038 
3039  of->ctx->interrupt_callback = int_cb;
3040 
3041  ret = avformat_write_header(of->ctx, &of->opts);
3042  if (ret < 0) {
3044  "Could not write header for output file #%d "
3045  "(incorrect codec parameters ?): %s\n",
3047  return ret;
3048  }
3049  //assert_avoptions(of->opts);
3050  of->header_written = 1;
3051 
3052  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3053  nb_output_dumped++;
3054 
3055  if (sdp_filename || want_sdp)
3056  print_sdp();
3057 
3058  /* flush the muxing queues */
3059  for (i = 0; i < of->ctx->nb_streams; i++) {
3061 
3062  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3063  if (!av_fifo_size(ost->muxing_queue))
3065 
3066  while (av_fifo_size(ost->muxing_queue)) {
3067  AVPacket *pkt;
3070  write_packet(of, pkt, ost, 1);
3071  av_packet_free(&pkt);
3072  }
3073  }
3074 
3075  return 0;
3076 }
3077 
3079 {
3081  int ret;
3082 
3083  if (!ctx)
3084  return 0;
3085 
3086  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3087  if (ret < 0)
3088  return ret;
3089 
3090  ctx->time_base_in = ost->st->time_base;
3091 
3092  ret = av_bsf_init(ctx);
3093  if (ret < 0) {
3094  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3095  ctx->filter->name);
3096  return ret;
3097  }
3098 
3099  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3100  if (ret < 0)
3101  return ret;
3102  ost->st->time_base = ctx->time_base_out;
3103 
3104  return 0;
3105 }
3106 
3108 {
3111  AVCodecParameters *par_dst = ost->st->codecpar;
3112  AVCodecParameters *par_src = ost->ref_par;
3113  AVRational sar;
3114  int i, ret;
3115  uint32_t codec_tag = par_dst->codec_tag;
3116 
3117  av_assert0(ist && !ost->filter);
3118 
3119  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3120  if (ret >= 0)
3122  if (ret < 0) {
3124  "Error setting up codec context options.\n");
3125  return ret;
3126  }
3127 
3129  if (ret < 0) {
3131  "Error getting reference codec parameters.\n");
3132  return ret;
3133  }
3134 
3135  if (!codec_tag) {
3136  unsigned int codec_tag_tmp;
3137  if (!of->ctx->oformat->codec_tag ||
3138  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3139  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3140  codec_tag = par_src->codec_tag;
3141  }
3142 
3143  ret = avcodec_parameters_copy(par_dst, par_src);
3144  if (ret < 0)
3145  return ret;
3146 
3147  par_dst->codec_tag = codec_tag;
3148 
3149  if (!ost->frame_rate.num)
3150  ost->frame_rate = ist->framerate;
3151 
3152  if (ost->frame_rate.num)
3154  else
3155  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3156 
3158  if (ret < 0)
3159  return ret;
3160 
3161  // copy timebase while removing common factors
3162  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3163  if (ost->frame_rate.num)
3165  else
3167  }
3168 
3169  // copy estimated duration as a hint to the muxer
3170  if (ost->st->duration <= 0 && ist->st->duration > 0)
3171  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3172 
3173  // copy disposition
3174  ost->st->disposition = ist->st->disposition;
3175 
3176  if (ist->st->nb_side_data) {
3177  for (i = 0; i < ist->st->nb_side_data; i++) {
3178  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3179  uint8_t *dst_data;
3180 
3181  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3182  if (!dst_data)
3183  return AVERROR(ENOMEM);
3184  memcpy(dst_data, sd_src->data, sd_src->size);
3185  }
3186  }
3187 
3188  if (ost->rotate_overridden) {
3190  sizeof(int32_t) * 9);
3191  if (sd)
3193  }
3194 
3195  switch (par_dst->codec_type) {
3196  case AVMEDIA_TYPE_AUDIO:
3197  if (audio_volume != 256) {
3198  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3199  exit_program(1);
3200  }
3201  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3202  par_dst->block_align= 0;
3203  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3204  par_dst->block_align= 0;
3205  break;
3206  case AVMEDIA_TYPE_VIDEO:
3207  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3208  sar =
3210  (AVRational){ par_dst->height, par_dst->width });
3211  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3212  "with stream copy may produce invalid files\n");
3213  }
3214  else if (ist->st->sample_aspect_ratio.num)
3215  sar = ist->st->sample_aspect_ratio;
3216  else
3217  sar = par_src->sample_aspect_ratio;
3218  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3219  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3220  ost->st->r_frame_rate = ist->st->r_frame_rate;
3221  break;
3222  }
3223 
3224  ost->mux_timebase = ist->st->time_base;
3225 
3226  return 0;
3227 }
3228 
3230 {
3231  AVDictionaryEntry *e;
3232 
3233  uint8_t *encoder_string;
3234  int encoder_string_len;
3235  int format_flags = 0;
3236  int codec_flags = ost->enc_ctx->flags;
3237 
3238  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3239  return;
3240 
3241  e = av_dict_get(of->opts, "fflags", NULL, 0);
3242  if (e) {
3243  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3244  if (!o)
3245  return;
3246  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3247  }
3248  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3249  if (e) {
3250  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3251  if (!o)
3252  return;
3254  }
3255 
3256  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3257  encoder_string = av_mallocz(encoder_string_len);
3258  if (!encoder_string)
3259  exit_program(1);
3260 
3261  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3262  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3263  else
3264  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3265  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3266  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3268 }
3269 
3271  AVCodecContext *avctx)
3272 {
3273  char *p;
3274  int n = 1, i, size, index = 0;
3275  int64_t t, *pts;
3276 
3277  for (p = kf; *p; p++)
3278  if (*p == ',')
3279  n++;
3280  size = n;
3281  pts = av_malloc_array(size, sizeof(*pts));
3282  if (!pts) {
3283  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3284  exit_program(1);
3285  }
3286 
3287  p = kf;
3288  for (i = 0; i < n; i++) {
3289  char *next = strchr(p, ',');
3290 
3291  if (next)
3292  *next++ = 0;
3293 
3294  if (!memcmp(p, "chapters", 8)) {
3295 
3297  int j;
3298 
3299  if (avf->nb_chapters > INT_MAX - size ||
3300  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3301  sizeof(*pts)))) {
3303  "Could not allocate forced key frames array.\n");
3304  exit_program(1);
3305  }
3306  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3307  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3308 
3309  for (j = 0; j < avf->nb_chapters; j++) {
3310  AVChapter *c = avf->chapters[j];
3311  av_assert1(index < size);
3312  pts[index++] = av_rescale_q(c->start, c->time_base,
3313  avctx->time_base) + t;
3314  }
3315 
3316  } else {
3317 
3318  t = parse_time_or_die("force_key_frames", p, 1);
3319  av_assert1(index < size);
3320  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3321 
3322  }
3323 
3324  p = next;
3325  }
3326 
3327  av_assert0(index == size);
3328  qsort(pts, size, sizeof(*pts), compare_int64);
3330  ost->forced_kf_pts = pts;
3331 }
3332 
3333 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3334 {
3336  AVCodecContext *enc_ctx = ost->enc_ctx;
3337  AVFormatContext *oc;
3338 
3339  if (ost->enc_timebase.num > 0) {
3340  enc_ctx->time_base = ost->enc_timebase;
3341  return;
3342  }
3343 
3344  if (ost->enc_timebase.num < 0) {
3345  if (ist) {
3346  enc_ctx->time_base = ist->st->time_base;
3347  return;
3348  }
3349 
3350  oc = output_files[ost->file_index]->ctx;
3351  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3352  }
3353 
3354  enc_ctx->time_base = default_time_base;
3355 }
3356 
3358 {
3360  AVCodecContext *enc_ctx = ost->enc_ctx;
3363  int j, ret;
3364 
3366 
3367  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3368  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3369  // which have to be filtered out to prevent leaking them to output files.
3370  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3371 
3372  if (ist) {
3373  ost->st->disposition = ist->st->disposition;
3374 
3375  dec_ctx = ist->dec_ctx;
3376 
3378  } else {
3379  for (j = 0; j < oc->nb_streams; j++) {
3380  AVStream *st = oc->streams[j];
3381  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3382  break;
3383  }
3384  if (j == oc->nb_streams)
3388  }
3389 
3390  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3391  if (!ost->frame_rate.num)
3393  if (ist && !ost->frame_rate.num)
3394  ost->frame_rate = ist->framerate;
3395  if (ist && !ost->frame_rate.num)
3396  ost->frame_rate = ist->st->r_frame_rate;
3397  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3398  ost->frame_rate = (AVRational){25, 1};
3400  "No information "
3401  "about the input framerate is available. Falling "
3402  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3403  "if you want a different framerate.\n",
3404  ost->file_index, ost->index);
3405  }
3406 
3407  if (ost->max_frame_rate.num &&
3409  !ost->frame_rate.den))
3411 
3412  if (ost->enc->supported_framerates && !ost->force_fps) {
3413  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3414  ost->frame_rate = ost->enc->supported_framerates[idx];
3415  }
3416  // reduce frame rate for mpeg4 to be within the spec limits
3417  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3419  ost->frame_rate.num, ost->frame_rate.den, 65535);
3420  }
3421  }
3422 
3423  switch (enc_ctx->codec_type) {
3424  case AVMEDIA_TYPE_AUDIO:
3426  if (dec_ctx)
3428  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3432 
3434  break;
3435 
3436  case AVMEDIA_TYPE_VIDEO:
3438 
3439  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3441  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3443  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3444  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3445  }
3446 
3447  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3448  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3450  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3451  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3453 
3455  if (dec_ctx)
3457  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3458 
3459  if (frame) {
3460  enc_ctx->color_range = frame->color_range;
3461  enc_ctx->color_primaries = frame->color_primaries;
3462  enc_ctx->color_trc = frame->color_trc;
3463  enc_ctx->colorspace = frame->colorspace;
3464  enc_ctx->chroma_sample_location = frame->chroma_location;
3465  }
3466 
3467  enc_ctx->framerate = ost->frame_rate;
3468 
3470 
3471  if (!dec_ctx ||
3472  enc_ctx->width != dec_ctx->width ||
3473  enc_ctx->height != dec_ctx->height ||
3474  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3476  }
3477 
3478  if (ost->top_field_first == 0) {
3479  enc_ctx->field_order = AV_FIELD_BB;
3480  } else if (ost->top_field_first == 1) {
3481  enc_ctx->field_order = AV_FIELD_TT;
3482  }
3483 
3484  if (frame) {
3486  ost->top_field_first >= 0)
3487  frame->top_field_first = !!ost->top_field_first;
3488 
3489  if (frame->interlaced_frame) {
3490  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3491  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3492  else
3493  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3494  } else
3495  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3496  }
3497 
3498  if (ost->forced_keyframes) {
3499  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3502  if (ret < 0) {
3504  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3505  return ret;
3506  }
3511 
3512  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3513  // parse it only for static kf timings
3514  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3516  }
3517  }
3518  break;
3519  case AVMEDIA_TYPE_SUBTITLE:
3520  enc_ctx->time_base = AV_TIME_BASE_Q;
3521  if (!enc_ctx->width) {
3524  }
3525  break;
3526  case AVMEDIA_TYPE_DATA:
3527  break;
3528  default:
3529  abort();
3530  break;
3531  }
3532 
3533  ost->mux_timebase = enc_ctx->time_base;
3534 
3535  return 0;
3536 }
3537 
3539  char *error, int error_len)
3540 {
3541  int ret = 0;
3542 
3543  if (ost->encoding_needed) {
3544  const AVCodec *codec = ost->enc;
3545  AVCodecContext *dec = NULL;
3546  InputStream *ist;
3547 
3549  if (ret < 0)
3550  return ret;
3551 
3552  if ((ist = get_input_stream(ost)))
3553  dec = ist->dec_ctx;
3554  if (dec && dec->subtitle_header) {
3555  /* ASS code assumes this buffer is null terminated so add extra byte. */
3556  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3557  if (!ost->enc_ctx->subtitle_header)
3558  return AVERROR(ENOMEM);
3559  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3560  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3561  }
3562  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3563  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3564  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3565  !codec->defaults &&
3566  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3567  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3568  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3569 
3571  if (ret < 0) {
3572  snprintf(error, error_len, "Device setup failed for "
3573  "encoder on output stream #%d:%d : %s",
3575  return ret;
3576  }
3577 
3578  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3579  int input_props = 0, output_props = 0;
3580  AVCodecDescriptor const *input_descriptor =
3581  avcodec_descriptor_get(dec->codec_id);
3582  AVCodecDescriptor const *output_descriptor =
3584  if (input_descriptor)
3585  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3586  if (output_descriptor)
3587  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3588  if (input_props && output_props && input_props != output_props) {
3589  snprintf(error, error_len,
3590  "Subtitle encoding currently only possible from text to text "
3591  "or bitmap to bitmap");
3592  return AVERROR_INVALIDDATA;
3593  }
3594  }
3595 
3596  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3597  if (ret == AVERROR_EXPERIMENTAL)
3598  abort_codec_experimental(codec, 1);
3599  snprintf(error, error_len,
3600  "Error while opening encoder for output stream #%d:%d - "
3601  "maybe incorrect parameters such as bit_rate, rate, width or height",
3602  ost->file_index, ost->index);
3603  return ret;
3604  }
3605  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3606  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3608  ost->enc_ctx->frame_size);
3610  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3611  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3612  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3613  " It takes bits/s as argument, not kbits/s\n");
3614 
3616  if (ret < 0) {
3618  "Error initializing the output stream codec context.\n");
3619  exit_program(1);
3620  }
3621 
3622  if (ost->enc_ctx->nb_coded_side_data) {
3623  int i;
3624 
3625  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3626  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3627  uint8_t *dst_data;
3628 
3629  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3630  if (!dst_data)
3631  return AVERROR(ENOMEM);
3632  memcpy(dst_data, sd_src->data, sd_src->size);
3633  }
3634  }
3635 
3636  /*
3637  * Add global input side data. For now this is naive, and copies it
3638  * from the input stream's global side data. All side data should
3639  * really be funneled over AVFrame and libavfilter, then added back to
3640  * packet side data, and then potentially using the first packet for
3641  * global side data.
3642  */
3643  if (ist) {
3644  int i;
3645  for (i = 0; i < ist->st->nb_side_data; i++) {
3646  AVPacketSideData *sd = &ist->st->side_data[i];
3647  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3648  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3649  if (!dst)
3650  return AVERROR(ENOMEM);
3651  memcpy(dst, sd->data, sd->size);
3652  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3653  av_display_rotation_set((uint32_t *)dst, 0);
3654  }
3655  }
3656  }
3657 
3658  // copy timebase while removing common factors
3659  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3661 
3662  // copy estimated duration as a hint to the muxer
3663  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3664  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3665  } else if (ost->stream_copy) {
3667  if (ret < 0)
3668  return ret;
3669  }
3670 
3671  // parse user provided disposition, and update stream values
3672  if (ost->disposition) {
3673  static const AVOption opts[] = {
3674  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3675  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3676  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3677  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3678  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3679  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3680  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3681  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3682  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3683  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3684  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3685  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3686  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3687  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3688  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3689  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3690  { NULL },
3691  };
3692  static const AVClass class = {
3693  .class_name = "",
3694  .item_name = av_default_item_name,
3695  .option = opts,
3696  .version = LIBAVUTIL_VERSION_INT,
3697  };
3698  const AVClass *pclass = &class;
3699 
3700  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3701  if (ret < 0)
3702  return ret;
3703  }
3704 
3705  /* initialize bitstream filters for the output stream
3706  * needs to be done here, because the codec id for streamcopy is not
3707  * known until now */
3709  if (ret < 0)
3710  return ret;
3711 
3712  ost->initialized = 1;
3713 
3715  if (ret < 0)
3716  return ret;
3717 
3718  return ret;
3719 }
3720 
3721 static void report_new_stream(int input_index, AVPacket *pkt)
3722 {
3723  InputFile *file = input_files[input_index];
3724  AVStream *st = file->ctx->streams[pkt->stream_index];
3725 
3726  if (pkt->stream_index < file->nb_streams_warn)
3727  return;
3728  av_log(file->ctx, AV_LOG_WARNING,
3729  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3731  input_index, pkt->stream_index,
3732  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3733  file->nb_streams_warn = pkt->stream_index + 1;
3734 }
3735 
3736 static int transcode_init(void)
3737 {
3738  int ret = 0, i, j, k;
3739  AVFormatContext *oc;
3740  OutputStream *ost;
3741  InputStream *ist;
3742  char error[1024] = {0};
3743 
3744  for (i = 0; i < nb_filtergraphs; i++) {
3745  FilterGraph *fg = filtergraphs[i];
3746  for (j = 0; j < fg->nb_outputs; j++) {
3747  OutputFilter *ofilter = fg->outputs[j];
3748  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3749  continue;
3750  if (fg->nb_inputs != 1)
3751  continue;
3752  for (k = nb_input_streams-1; k >= 0 ; k--)
3753  if (fg->inputs[0]->ist == input_streams[k])
3754  break;
3755  ofilter->ost->source_index = k;
3756  }
3757  }
3758 
3759  /* init framerate emulation */
3760  for (i = 0; i < nb_input_files; i++) {
3762  if (ifile->rate_emu)
3763  for (j = 0; j < ifile->nb_streams; j++)
3764  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3765  }
3766 
3767  /* init input streams */
3768  for (i = 0; i < nb_input_streams; i++)
3769  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3770  for (i = 0; i < nb_output_streams; i++) {
3771  ost = output_streams[i];
3773  }
3774  goto dump_format;
3775  }
3776 
3777  /*
3778  * initialize stream copy and subtitle/data streams.
3779  * Encoded AVFrame based streams will get initialized as follows:
3780  * - when the first AVFrame is received in do_video_out
3781  * - just before the first AVFrame is received in either transcode_step
3782  * or reap_filters due to us requiring the filter chain buffer sink
3783  * to be configured with the correct audio frame size, which is only
3784  * known after the encoder is initialized.
3785  */
3786  for (i = 0; i < nb_output_streams; i++) {
3787  if (!output_streams[i]->stream_copy &&
3788  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3790  continue;
3791 
3793  if (ret < 0)
3794  goto dump_format;
3795  }
3796 
3797  /* discard unused programs */
3798  for (i = 0; i < nb_input_files; i++) {
3800  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3801  AVProgram *p = ifile->ctx->programs[j];
3802  int discard = AVDISCARD_ALL;
3803 
3804  for (k = 0; k < p->nb_stream_indexes; k++)
3805  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3806  discard = AVDISCARD_DEFAULT;
3807  break;
3808  }
3809  p->discard = discard;
3810  }
3811  }
3812 
3813  /* write headers for files with no streams */
3814  for (i = 0; i < nb_output_files; i++) {
3815  oc = output_files[i]->ctx;
3816  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3818  if (ret < 0)
3819  goto dump_format;
3820  }
3821  }
3822 
3823  dump_format:
3824  /* dump the stream mapping */
3825  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3826  for (i = 0; i < nb_input_streams; i++) {
3827  ist = input_streams[i];
3828 
3829  for (j = 0; j < ist->nb_filters; j++) {
3830  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3831  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3832  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3833  ist->filters[j]->name);
3834  if (nb_filtergraphs > 1)
3835  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3836  av_log(NULL, AV_LOG_INFO, "\n");
3837  }
3838  }
3839  }
3840 
3841  for (i = 0; i < nb_output_streams; i++) {
3842  ost = output_streams[i];
3843 
3844  if (ost->attachment_filename) {
3845  /* an attached file */
3846  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3848  continue;
3849  }
3850 
3852  /* output from a complex graph */
3853  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3854  if (nb_filtergraphs > 1)
3855  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3856 
3857  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3858  ost->index, ost->enc ? ost->enc->name : "?");
3859  continue;
3860  }
3861 
3862  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3865  ost->file_index,
3866  ost->index);
3868  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3870  ost->sync_ist->st->index);
3871  if (ost->stream_copy)
3872  av_log(NULL, AV_LOG_INFO, " (copy)");
3873  else {
3874  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3875  const AVCodec *out_codec = ost->enc;
3876  const char *decoder_name = "?";
3877  const char *in_codec_name = "?";
3878  const char *encoder_name = "?";
3879  const char *out_codec_name = "?";
3880  const AVCodecDescriptor *desc;
3881 
3882  if (in_codec) {
3883  decoder_name = in_codec->name;
3884  desc = avcodec_descriptor_get(in_codec->id);
3885  if (desc)
3886  in_codec_name = desc->name;
3887  if (!strcmp(decoder_name, in_codec_name))
3888  decoder_name = "native";
3889  }
3890 
3891  if (out_codec) {
3892  encoder_name = out_codec->name;
3893  desc = avcodec_descriptor_get(out_codec->id);
3894  if (desc)
3895  out_codec_name = desc->name;
3896  if (!strcmp(encoder_name, out_codec_name))
3897  encoder_name = "native";
3898  }
3899 
3900  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3901  in_codec_name, decoder_name,
3902  out_codec_name, encoder_name);
3903  }
3904  av_log(NULL, AV_LOG_INFO, "\n");
3905  }
3906 
3907  if (ret) {
3908  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3909  return ret;
3910  }
3911 
3913 
3914  return 0;
3915 }
3916 
3917 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3918 static int need_output(void)
3919 {
3920  int i;
3921 
3922  for (i = 0; i < nb_output_streams; i++) {
3926 
3927  if (ost->finished ||
3928  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3929  continue;
3930  if (ost->frame_number >= ost->max_frames) {
3931  int j;
3932  for (j = 0; j < of->ctx->nb_streams; j++)
3934  continue;
3935  }
3936 
3937  return 1;
3938  }
3939 
3940  return 0;
3941 }
3942 
3943 /**
3944  * Select the output stream to process.
3945  *
3946  * @return selected output stream, or NULL if none available
3947  */
3949 {
3950  int i;
3951  int64_t opts_min = INT64_MAX;
3952  OutputStream *ost_min = NULL;
3953 
3954  for (i = 0; i < nb_output_streams; i++) {
3956  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3958  AV_TIME_BASE_Q);
3961  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3963 
3964  if (!ost->initialized && !ost->inputs_done)
3965  return ost->unavailable ? NULL : ost;
3966 
3967  if (!ost->finished && opts < opts_min) {
3968  opts_min = opts;
3969  ost_min = ost->unavailable ? NULL : ost;
3970  }
3971  }
3972  return ost_min;
3973 }
3974 
3975 static void set_tty_echo(int on)
3976 {
3977 #if HAVE_TERMIOS_H
3978  struct termios tty;
3979  if (tcgetattr(0, &tty) == 0) {
3980  if (on) tty.c_lflag |= ECHO;
3981  else tty.c_lflag &= ~ECHO;
3982  tcsetattr(0, TCSANOW, &tty);
3983  }
3984 #endif
3985 }
3986 
3987 static int check_keyboard_interaction(int64_t cur_time)
3988 {
3989  int i, ret, key;
3990  static int64_t last_time;
3991  if (received_nb_signals)
3992  return AVERROR_EXIT;
3993  /* read_key() returns 0 on EOF */
3994  if(cur_time - last_time >= 100000 && !run_as_daemon){
3995  key = read_key();
3996  last_time = cur_time;
3997  }else
3998  key = -1;
3999  if (key == 'q')
4000  return AVERROR_EXIT;
4001  if (key == '+') av_log_set_level(av_log_get_level()+10);
4002  if (key == '-') av_log_set_level(av_log_get_level()-10);
4003  if (key == 's') qp_hist ^= 1;
4004  if (key == 'h'){
4005  if (do_hex_dump){
4006  do_hex_dump = do_pkt_dump = 0;
4007  } else if(do_pkt_dump){
4008  do_hex_dump = 1;
4009  } else
4010  do_pkt_dump = 1;
4012  }
4013  if (key == 'c' || key == 'C'){
4014  char buf[4096], target[64], command[256], arg[256] = {0};
4015  double time;
4016  int k, n = 0;
4017  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4018  i = 0;
4019  set_tty_echo(1);
4020  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4021  if (k > 0)
4022  buf[i++] = k;
4023  buf[i] = 0;
4024  set_tty_echo(0);
4025  fprintf(stderr, "\n");
4026  if (k > 0 &&
4027  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4028  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4029  target, time, command, arg);
4030  for (i = 0; i < nb_filtergraphs; i++) {
4031  FilterGraph *fg = filtergraphs[i];
4032  if (fg->graph) {
4033  if (time < 0) {
4034  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4035  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4036  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4037  } else if (key == 'c') {
4038  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4040  } else {
4041  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4042  if (ret < 0)
4043  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4044  }
4045  }
4046  }
4047  } else {
4049  "Parse error, at least 3 arguments were expected, "
4050  "only %d given in string '%s'\n", n, buf);
4051  }
4052  }
4053  if (key == 'd' || key == 'D'){
4054  int debug=0;
4055  if(key == 'D') {
4056  debug = input_streams[0]->dec_ctx->debug << 1;
4057  if(!debug) debug = 1;
4058  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4059  debug += debug;
4060  }else{
4061  char buf[32];
4062  int k = 0;
4063  i = 0;
4064  set_tty_echo(1);
4065  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4066  if (k > 0)
4067  buf[i++] = k;
4068  buf[i] = 0;
4069  set_tty_echo(0);
4070  fprintf(stderr, "\n");
4071  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4072  fprintf(stderr,"error parsing debug value\n");
4073  }
4074  for(i=0;i<