FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
537  av_fifo_freep(&ifilter->frame_queue);
538  av_freep(&ifilter->displaymatrix);
539  if (ist->sub2video.sub_queue) {
540  while (av_fifo_size(ist->sub2video.sub_queue)) {
541  AVSubtitle sub;
542  av_fifo_generic_read(ist->sub2video.sub_queue,
543  &sub, sizeof(sub), NULL);
545  }
546  av_fifo_freep(&ist->sub2video.sub_queue);
547  }
548  av_buffer_unref(&ifilter->hw_frames_ctx);
549  av_freep(&ifilter->name);
550  av_freep(&fg->inputs[j]);
551  }
552  av_freep(&fg->inputs);
553  for (j = 0; j < fg->nb_outputs; j++) {
554  OutputFilter *ofilter = fg->outputs[j];
555 
556  avfilter_inout_free(&ofilter->out_tmp);
557  av_freep(&ofilter->name);
558  av_freep(&ofilter->formats);
559  av_freep(&ofilter->channel_layouts);
560  av_freep(&ofilter->sample_rates);
561  av_freep(&fg->outputs[j]);
562  }
563  av_freep(&fg->outputs);
564  av_freep(&fg->graph_desc);
565 
567  }
569 
571 
572  /* close files */
573  for (i = 0; i < nb_output_files; i++) {
574  OutputFile *of = output_files[i];
576  if (!of)
577  continue;
578  s = of->ctx;
579  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
580  avio_closep(&s->pb);
582  av_dict_free(&of->opts);
583 
585  }
586  for (i = 0; i < nb_output_streams; i++) {
588 
589  if (!ost)
590  continue;
591 
593 
598 
601  av_freep(&ost->avfilter);
603 
606 
609 
612 
613  if (ost->muxing_queue) {
614  while (av_fifo_size(ost->muxing_queue)) {
615  AVPacket *pkt;
618  }
620  }
621 
623  }
624 #if HAVE_THREADS
625  free_input_threads();
626 #endif
627  for (i = 0; i < nb_input_files; i++) {
631  }
632  for (i = 0; i < nb_input_streams; i++) {
634 
635  av_frame_free(&ist->decoded_frame);
636  av_frame_free(&ist->filter_frame);
637  av_packet_free(&ist->pkt);
638  av_dict_free(&ist->decoder_opts);
639  avsubtitle_free(&ist->prev_sub.subtitle);
640  av_frame_free(&ist->sub2video.frame);
641  av_freep(&ist->filters);
642  av_freep(&ist->hwaccel_device);
643  av_freep(&ist->dts_buffer);
644 
645  avcodec_free_context(&ist->dec_ctx);
646 
648  }
649 
650  if (vstats_file) {
651  if (fclose(vstats_file))
653  "Error closing vstats file, loss of information possible: %s\n",
654  av_err2str(AVERROR(errno)));
655  }
658 
663 
664  uninit_opts();
665 
667 
668  if (received_sigterm) {
669  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
670  (int) received_sigterm);
671  } else if (ret && atomic_load(&transcode_init_done)) {
672  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
673  }
674  term_exit();
675  ffmpeg_exited = 1;
676 }
677 
679 {
680  AVDictionaryEntry *t = NULL;
681 
682  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
684  }
685 }
686 
688 {
690  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
691  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
692  exit_program(1);
693  }
694 }
695 
696 static void abort_codec_experimental(const AVCodec *c, int encoder)
697 {
698  exit_program(1);
699 }
700 
701 static void update_benchmark(const char *fmt, ...)
702 {
703  if (do_benchmark_all) {
705  va_list va;
706  char buf[1024];
707 
708  if (fmt) {
709  va_start(va, fmt);
710  vsnprintf(buf, sizeof(buf), fmt, va);
711  va_end(va);
713  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
716  t.real_usec - current_time.real_usec, buf);
717  }
718  current_time = t;
719  }
720 }
721 
723 {
724  int i;
725  for (i = 0; i < nb_output_streams; i++) {
726  OutputStream *ost2 = output_streams[i];
727  ost2->finished |= ost == ost2 ? this_stream : others;
728  }
729 }
730 
731 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
732 {
733  AVFormatContext *s = of->ctx;
734  AVStream *st = ost->st;
735  int ret;
736 
737  /*
738  * Audio encoders may split the packets -- #frames in != #packets out.
739  * But there is no reordering, so we can limit the number of output packets
740  * by simply dropping them here.
741  * Counting encoded video frames needs to be done separately because of
742  * reordering, see do_video_out().
743  * Do not count the packet when unqueued because it has been counted when queued.
744  */
745  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
746  if (ost->frame_number >= ost->max_frames) {
748  return;
749  }
750  ost->frame_number++;
751  }
752 
753  if (!of->header_written) {
754  AVPacket *tmp_pkt;
755  /* the muxer is not initialized yet, buffer the packet */
756  if (!av_fifo_space(ost->muxing_queue)) {
757  unsigned int are_we_over_size =
759  int new_size = are_we_over_size ?
763 
764  if (new_size <= av_fifo_size(ost->muxing_queue)) {
766  "Too many packets buffered for output stream %d:%d.\n",
767  ost->file_index, ost->st->index);
768  exit_program(1);
769  }
770  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
771  if (ret < 0)
772  exit_program(1);
773  }
775  if (ret < 0)
776  exit_program(1);
777  tmp_pkt = av_packet_alloc();
778  if (!tmp_pkt)
779  exit_program(1);
780  av_packet_move_ref(tmp_pkt, pkt);
781  ost->muxing_queue_data_size += tmp_pkt->size;
782  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
783  return;
784  }
785 
788  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
789 
791  int i;
793  NULL);
794  ost->quality = sd ? AV_RL32(sd) : -1;
795  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
796 
797  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
798  if (sd && i < sd[5])
799  ost->error[i] = AV_RL64(sd + 8 + 8*i);
800  else
801  ost->error[i] = -1;
802  }
803 
804  if (ost->frame_rate.num && ost->is_cfr) {
805  if (pkt->duration > 0)
806  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
808  ost->mux_timebase);
809  }
810  }
811 
813 
814  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
815  if (pkt->dts != AV_NOPTS_VALUE &&
816  pkt->pts != AV_NOPTS_VALUE &&
817  pkt->dts > pkt->pts) {
818  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
819  pkt->dts, pkt->pts,
820  ost->file_index, ost->st->index);
821  pkt->pts =
822  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
823  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
824  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
825  }
827  pkt->dts != AV_NOPTS_VALUE &&
830  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
831  if (pkt->dts < max) {
832  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
833  if (exit_on_error)
834  loglevel = AV_LOG_ERROR;
835  av_log(s, loglevel, "Non-monotonous DTS in output stream "
836  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
838  if (exit_on_error) {
839  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
840  exit_program(1);
841  }
842  av_log(s, loglevel, "changing to %"PRId64". This may result "
843  "in incorrect timestamps in the output file.\n",
844  max);
845  if (pkt->pts >= pkt->dts)
846  pkt->pts = FFMAX(pkt->pts, max);
847  pkt->dts = max;
848  }
849  }
850  }
851  ost->last_mux_dts = pkt->dts;
852 
853  ost->data_size += pkt->size;
854  ost->packets_written++;
855 
857 
858  if (debug_ts) {
859  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
860  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
864  pkt->size
865  );
866  }
867 
869  if (ret < 0) {
870  print_error("av_interleaved_write_frame()", ret);
871  main_return_code = 1;
873  }
875 }
876 
878 {
880 
882  if (of->shortest) {
884  of->recording_time = FFMIN(of->recording_time, end);
885  }
886 }
887 
888 /*
889  * Send a single packet to the output, applying any bitstream filters
890  * associated with the output stream. This may result in any number
891  * of packets actually being written, depending on what bitstream
892  * filters are applied. The supplied packet is consumed and will be
893  * blank (as if newly-allocated) when this function returns.
894  *
895  * If eof is set, instead indicate EOF to all bitstream filters and
896  * therefore flush any delayed packets to the output. A blank packet
897  * must be supplied in this case.
898  */
900  OutputStream *ost, int eof)
901 {
902  int ret = 0;
903 
904  /* apply the output bitstream filters */
905  if (ost->bsf_ctx) {
906  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
907  if (ret < 0)
908  goto finish;
909  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
910  write_packet(of, pkt, ost, 0);
911  if (ret == AVERROR(EAGAIN))
912  ret = 0;
913  } else if (!eof)
914  write_packet(of, pkt, ost, 0);
915 
916 finish:
917  if (ret < 0 && ret != AVERROR_EOF) {
918  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
919  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
920  if(exit_on_error)
921  exit_program(1);
922  }
923 }
924 
926 {
928 
929  if (of->recording_time != INT64_MAX &&
931  AV_TIME_BASE_Q) >= 0) {
933  return 0;
934  }
935  return 1;
936 }
937 
939  AVFrame *frame)
940 {
941  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
942  AVCodecContext *enc = ost->enc_ctx;
943  if (!frame || frame->pts == AV_NOPTS_VALUE ||
944  !enc || !ost->filter || !ost->filter->graph->graph)
945  goto early_exit;
946 
947  {
949 
950  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
952  AVRational tb = enc->time_base;
953  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
954 
955  tb.den <<= extra_bits;
956  float_pts =
957  av_rescale_q(frame->pts, filter_tb, tb) -
959  float_pts /= 1 << extra_bits;
960  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
961  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
962 
963  frame->pts =
964  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
966  }
967 
968 early_exit:
969 
970  if (debug_ts) {
971  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
972  frame ? av_ts2str(frame->pts) : "NULL",
973  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
974  float_pts,
975  enc ? enc->time_base.num : -1,
976  enc ? enc->time_base.den : -1);
977  }
978 
979  return float_pts;
980 }
981 
983  char *error, int error_len);
984 
986  unsigned int fatal)
987 {
988  int ret = AVERROR_BUG;
989  char error[1024] = {0};
990 
991  if (ost->initialized)
992  return 0;
993 
994  ret = init_output_stream(ost, frame, error, sizeof(error));
995  if (ret < 0) {
996  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
998 
999  if (fatal)
1000  exit_program(1);
1001  }
1002 
1003  return ret;
1004 }
1005 
1007  AVFrame *frame)
1008 {
1009  AVCodecContext *enc = ost->enc_ctx;
1010  AVPacket *pkt = ost->pkt;
1011  int ret;
1012 
1014 
1015  if (!check_recording_time(ost))
1016  return;
1017 
1018  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1019  frame->pts = ost->sync_opts;
1020  ost->sync_opts = frame->pts + frame->nb_samples;
1021  ost->samples_encoded += frame->nb_samples;
1022  ost->frames_encoded++;
1023 
1025  if (debug_ts) {
1026  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1027  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1028  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1029  enc->time_base.num, enc->time_base.den);
1030  }
1031 
1032  ret = avcodec_send_frame(enc, frame);
1033  if (ret < 0)
1034  goto error;
1035 
1036  while (1) {
1038  ret = avcodec_receive_packet(enc, pkt);
1039  if (ret == AVERROR(EAGAIN))
1040  break;
1041  if (ret < 0)
1042  goto error;
1043 
1044  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1045 
1047 
1048  if (debug_ts) {
1049  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1050  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1053  }
1054 
1055  output_packet(of, pkt, ost, 0);
1056  }
1057 
1058  return;
1059 error:
1060  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1061  exit_program(1);
1062 }
1063 
1064 static void do_subtitle_out(OutputFile *of,
1065  OutputStream *ost,
1066  AVSubtitle *sub)
1067 {
1068  int subtitle_out_max_size = 1024 * 1024;
1069  int subtitle_out_size, nb, i;
1070  AVCodecContext *enc;
1071  AVPacket *pkt = ost->pkt;
1072  int64_t pts;
1073 
1074  if (sub->pts == AV_NOPTS_VALUE) {
1075  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1076  if (exit_on_error)
1077  exit_program(1);
1078  return;
1079  }
1080 
1081  enc = ost->enc_ctx;
1082 
1083  if (!subtitle_out) {
1084  subtitle_out = av_malloc(subtitle_out_max_size);
1085  if (!subtitle_out) {
1086  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1087  exit_program(1);
1088  }
1089  }
1090 
1091  /* Note: DVB subtitle need one packet to draw them and one other
1092  packet to clear them */
1093  /* XXX: signal it in the codec context ? */
1094  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1095  nb = 2;
1096  else
1097  nb = 1;
1098 
1099  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1100  pts = sub->pts;
1103  for (i = 0; i < nb; i++) {
1104  unsigned save_num_rects = sub->num_rects;
1105 
1107  if (!check_recording_time(ost))
1108  return;
1109 
1110  sub->pts = pts;
1111  // start_display_time is required to be 0
1112  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1113  sub->end_display_time -= sub->start_display_time;
1114  sub->start_display_time = 0;
1115  if (i == 1)
1116  sub->num_rects = 0;
1117 
1118  ost->frames_encoded++;
1119 
1120  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1121  subtitle_out_max_size, sub);
1122  if (i == 1)
1123  sub->num_rects = save_num_rects;
1124  if (subtitle_out_size < 0) {
1125  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1126  exit_program(1);
1127  }
1128 
1130  pkt->data = subtitle_out;
1131  pkt->size = subtitle_out_size;
1133  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1134  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1135  /* XXX: the pts correction is handled here. Maybe handling
1136  it in the codec would be better */
1137  if (i == 0)
1138  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139  else
1140  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1141  }
1142  pkt->dts = pkt->pts;
1143  output_packet(of, pkt, ost, 0);
1144  }
1145 }
1146 
1147 static void do_video_out(OutputFile *of,
1148  OutputStream *ost,
1149  AVFrame *next_picture)
1150 {
1151  int ret, format_video_sync;
1152  AVPacket *pkt = ost->pkt;
1153  AVCodecContext *enc = ost->enc_ctx;
1154  AVRational frame_rate;
1155  int nb_frames, nb0_frames, i;
1156  double delta, delta0;
1157  double duration = 0;
1158  double sync_ipts = AV_NOPTS_VALUE;
1159  int frame_size = 0;
1160  InputStream *ist = NULL;
1162 
1163  init_output_stream_wrapper(ost, next_picture, 1);
1164  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1165 
1166  if (ost->source_index >= 0)
1168 
1169  frame_rate = av_buffersink_get_frame_rate(filter);
1170  if (frame_rate.num > 0 && frame_rate.den > 0)
1171  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1172 
1173  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1175 
1176  if (!ost->filters_script &&
1177  !ost->filters &&
1178  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1179  next_picture &&
1180  ist &&
1181  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1182  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1183  }
1184 
1185  if (!next_picture) {
1186  //end, flushing
1187  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1188  ost->last_nb0_frames[1],
1189  ost->last_nb0_frames[2]);
1190  } else {
1191  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1192  delta = delta0 + duration;
1193 
1194  /* by default, we output a single frame */
1195  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1196  nb_frames = 1;
1197 
1198  format_video_sync = video_sync_method;
1199  if (format_video_sync == VSYNC_AUTO) {
1200  if(!strcmp(of->ctx->oformat->name, "avi")) {
1201  format_video_sync = VSYNC_VFR;
1202  } else
1203  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1204  if ( ist
1205  && format_video_sync == VSYNC_CFR
1206  && input_files[ist->file_index]->ctx->nb_streams == 1
1207  && input_files[ist->file_index]->input_ts_offset == 0) {
1208  format_video_sync = VSYNC_VSCFR;
1209  }
1210  if (format_video_sync == VSYNC_CFR && copy_ts) {
1211  format_video_sync = VSYNC_VSCFR;
1212  }
1213  }
1214  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1215 
1216  if (delta0 < 0 &&
1217  delta > 0 &&
1218  format_video_sync != VSYNC_PASSTHROUGH &&
1219  format_video_sync != VSYNC_DROP) {
1220  if (delta0 < -0.6) {
1221  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1222  } else
1223  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1224  sync_ipts = ost->sync_opts;
1225  duration += delta0;
1226  delta0 = 0;
1227  }
1228 
1229  switch (format_video_sync) {
1230  case VSYNC_VSCFR:
1231  if (ost->frame_number == 0 && delta0 >= 0.5) {
1232  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1233  delta = duration;
1234  delta0 = 0;
1235  ost->sync_opts = llrint(sync_ipts);
1236  }
1237  case VSYNC_CFR:
1238  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1239  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1240  nb_frames = 0;
1241  } else if (delta < -1.1)
1242  nb_frames = 0;
1243  else if (delta > 1.1) {
1244  nb_frames = lrintf(delta);
1245  if (delta0 > 1.1)
1246  nb0_frames = llrintf(delta0 - 0.6);
1247  }
1248  break;
1249  case VSYNC_VFR:
1250  if (delta <= -0.6)
1251  nb_frames = 0;
1252  else if (delta > 0.6)
1253  ost->sync_opts = llrint(sync_ipts);
1254  break;
1255  case VSYNC_DROP:
1256  case VSYNC_PASSTHROUGH:
1257  ost->sync_opts = llrint(sync_ipts);
1258  break;
1259  default:
1260  av_assert0(0);
1261  }
1262  }
1263 
1264  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1265  nb0_frames = FFMIN(nb0_frames, nb_frames);
1266 
1267  memmove(ost->last_nb0_frames + 1,
1269  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1270  ost->last_nb0_frames[0] = nb0_frames;
1271 
1272  if (nb0_frames == 0 && ost->last_dropped) {
1273  nb_frames_drop++;
1275  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1277  }
1278  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1279  if (nb_frames > dts_error_threshold * 30) {
1280  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1281  nb_frames_drop++;
1282  return;
1283  }
1284  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1285  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1286  if (nb_frames_dup > dup_warning) {
1287  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1288  dup_warning *= 10;
1289  }
1290  }
1291  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1292  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1293 
1294  /* duplicates frame if needed */
1295  for (i = 0; i < nb_frames; i++) {
1296  AVFrame *in_picture;
1297  int forced_keyframe = 0;
1298  double pts_time;
1299 
1300  if (i < nb0_frames && ost->last_frame) {
1301  in_picture = ost->last_frame;
1302  } else
1303  in_picture = next_picture;
1304 
1305  if (!in_picture)
1306  return;
1307 
1308  in_picture->pts = ost->sync_opts;
1309 
1310  if (!check_recording_time(ost))
1311  return;
1312 
1313  in_picture->quality = enc->global_quality;
1314  in_picture->pict_type = 0;
1315 
1317  in_picture->pts != AV_NOPTS_VALUE)
1318  ost->forced_kf_ref_pts = in_picture->pts;
1319 
1320  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1321  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1323  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1324  ost->forced_kf_index++;
1325  forced_keyframe = 1;
1326  } else if (ost->forced_keyframes_pexpr) {
1327  double res;
1331  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1337  res);
1338  if (res) {
1339  forced_keyframe = 1;
1345  }
1346 
1348  } else if ( ost->forced_keyframes
1349  && !strncmp(ost->forced_keyframes, "source", 6)
1350  && in_picture->key_frame==1
1351  && !i) {
1352  forced_keyframe = 1;
1353  } else if ( ost->forced_keyframes
1354  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1355  && !i) {
1356  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1357  ost->dropped_keyframe = 0;
1358  }
1359 
1360  if (forced_keyframe) {
1361  in_picture->pict_type = AV_PICTURE_TYPE_I;
1362  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1363  }
1364 
1366  if (debug_ts) {
1367  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1368  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1369  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1370  enc->time_base.num, enc->time_base.den);
1371  }
1372 
1373  ost->frames_encoded++;
1374 
1375  ret = avcodec_send_frame(enc, in_picture);
1376  if (ret < 0)
1377  goto error;
1378  // Make sure Closed Captions will not be duplicated
1380 
1381  while (1) {
1383  ret = avcodec_receive_packet(enc, pkt);
1384  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1385  if (ret == AVERROR(EAGAIN))
1386  break;
1387  if (ret < 0)
1388  goto error;
1389 
1390  if (debug_ts) {
1391  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1392  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1395  }
1396 
1398  pkt->pts = ost->sync_opts;
1399 
1401 
1402  if (debug_ts) {
1403  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1404  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1407  }
1408 
1409  frame_size = pkt->size;
1410  output_packet(of, pkt, ost, 0);
1411 
1412  /* if two pass, output log */
1413  if (ost->logfile && enc->stats_out) {
1414  fprintf(ost->logfile, "%s", enc->stats_out);
1415  }
1416  }
1417  ost->sync_opts++;
1418  /*
1419  * For video, number of frames in == number of packets out.
1420  * But there may be reordering, so we can't throw away frames on encoder
1421  * flush, we need to limit them here, before they go into encoder.
1422  */
1423  ost->frame_number++;
1424 
1425  if (vstats_filename && frame_size)
1427  }
1428 
1429  if (!ost->last_frame)
1432  if (next_picture && ost->last_frame)
1433  av_frame_ref(ost->last_frame, next_picture);
1434  else
1436 
1437  return;
1438 error:
1439  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1440  exit_program(1);
1441 }
1442 
1443 static double psnr(double d)
1444 {
1445  return -10.0 * log10(d);
1446 }
1447 
1449 {
1450  AVCodecContext *enc;
1451  int frame_number;
1452  double ti1, bitrate, avg_bitrate;
1453 
1454  /* this is executed just the first time do_video_stats is called */
1455  if (!vstats_file) {
1456  vstats_file = fopen(vstats_filename, "w");
1457  if (!vstats_file) {
1458  perror("fopen");
1459  exit_program(1);
1460  }
1461  }
1462 
1463  enc = ost->enc_ctx;
1464  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1465  frame_number = ost->st->nb_frames;
1466  if (vstats_version <= 1) {
1467  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1468  ost->quality / (float)FF_QP2LAMBDA);
1469  } else {
1470  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1471  ost->quality / (float)FF_QP2LAMBDA);
1472  }
1473 
1474  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1475  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1476 
1477  fprintf(vstats_file,"f_size= %6d ", frame_size);
1478  /* compute pts value */
1480  if (ti1 < 0.01)
1481  ti1 = 0.01;
1482 
1483  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1484  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1485  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1486  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1487  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1488  }
1489 }
1490 
1492 {
1494  int i;
1495 
1497 
1498  if (of->shortest) {
1499  for (i = 0; i < of->ctx->nb_streams; i++)
1501  }
1502 }
1503 
1504 /**
1505  * Get and encode new output from any of the filtergraphs, without causing
1506  * activity.
1507  *
1508  * @return 0 for success, <0 for severe errors
1509  */
1510 static int reap_filters(int flush)
1511 {
1512  AVFrame *filtered_frame = NULL;
1513  int i;
1514 
1515  /* Reap all buffers present in the buffer sinks */
1516  for (i = 0; i < nb_output_streams; i++) {
1520  AVCodecContext *enc = ost->enc_ctx;
1521  int ret = 0;
1522 
1523  if (!ost->filter || !ost->filter->graph->graph)
1524  continue;
1525  filter = ost->filter->filter;
1526 
1527  /*
1528  * Unlike video, with audio the audio frame size matters.
1529  * Currently we are fully reliant on the lavfi filter chain to
1530  * do the buffering deed for us, and thus the frame size parameter
1531  * needs to be set accordingly. Where does one get the required
1532  * frame size? From the initialized AVCodecContext of an audio
1533  * encoder. Thus, if we have gotten to an audio stream, initialize
1534  * the encoder earlier than receiving the first AVFrame.
1535  */
1538 
1539  if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1540  return AVERROR(ENOMEM);
1541  }
1542  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1543  return AVERROR(ENOMEM);
1544  }
1545  filtered_frame = ost->filtered_frame;
1546 
1547  while (1) {
1548  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1550  if (ret < 0) {
1551  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1553  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1554  } else if (flush && ret == AVERROR_EOF) {
1556  do_video_out(of, ost, NULL);
1557  }
1558  break;
1559  }
1560  if (ost->finished) {
1561  av_frame_unref(filtered_frame);
1562  continue;
1563  }
1564 
1565  switch (av_buffersink_get_type(filter)) {
1566  case AVMEDIA_TYPE_VIDEO:
1567  if (!ost->frame_aspect_ratio.num)
1568  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1569 
1570  do_video_out(of, ost, filtered_frame);
1571  break;
1572  case AVMEDIA_TYPE_AUDIO:
1573  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1574  enc->channels != filtered_frame->channels) {
1576  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1577  break;
1578  }
1579  do_audio_out(of, ost, filtered_frame);
1580  break;
1581  default:
1582  // TODO support subtitle filters
1583  av_assert0(0);
1584  }
1585 
1586  av_frame_unref(filtered_frame);
1587  }
1588  }
1589 
1590  return 0;
1591 }
1592 
1593 static void print_final_stats(int64_t total_size)
1594 {
1595  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1596  uint64_t subtitle_size = 0;
1597  uint64_t data_size = 0;
1598  float percent = -1.0;
1599  int i, j;
1600  int pass1_used = 1;
1601 
1602  for (i = 0; i < nb_output_streams; i++) {
1604  switch (ost->enc_ctx->codec_type) {
1605  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1606  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1607  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1608  default: other_size += ost->data_size; break;
1609  }
1610  extra_size += ost->enc_ctx->extradata_size;
1611  data_size += ost->data_size;
1614  pass1_used = 0;
1615  }
1616 
1617  if (data_size && total_size>0 && total_size >= data_size)
1618  percent = 100.0 * (total_size - data_size) / data_size;
1619 
1620  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1621  video_size / 1024.0,
1622  audio_size / 1024.0,
1623  subtitle_size / 1024.0,
1624  other_size / 1024.0,
1625  extra_size / 1024.0);
1626  if (percent >= 0.0)
1627  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1628  else
1629  av_log(NULL, AV_LOG_INFO, "unknown");
1630  av_log(NULL, AV_LOG_INFO, "\n");
1631 
1632  /* print verbose per-stream stats */
1633  for (i = 0; i < nb_input_files; i++) {
1634  InputFile *f = input_files[i];
1635  uint64_t total_packets = 0, total_size = 0;
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1638  i, f->ctx->url);
1639 
1640  for (j = 0; j < f->nb_streams; j++) {
1641  InputStream *ist = input_streams[f->ist_index + j];
1642  enum AVMediaType type = ist->dec_ctx->codec_type;
1643 
1644  total_size += ist->data_size;
1645  total_packets += ist->nb_packets;
1646 
1647  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1648  i, j, media_type_string(type));
1649  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1650  ist->nb_packets, ist->data_size);
1651 
1652  if (ist->decoding_needed) {
1653  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1654  ist->frames_decoded);
1655  if (type == AVMEDIA_TYPE_AUDIO)
1656  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1657  av_log(NULL, AV_LOG_VERBOSE, "; ");
1658  }
1659 
1660  av_log(NULL, AV_LOG_VERBOSE, "\n");
1661  }
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1664  total_packets, total_size);
1665  }
1666 
1667  for (i = 0; i < nb_output_files; i++) {
1668  OutputFile *of = output_files[i];
1669  uint64_t total_packets = 0, total_size = 0;
1670 
1671  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1672  i, of->ctx->url);
1673 
1674  for (j = 0; j < of->ctx->nb_streams; j++) {
1677 
1678  total_size += ost->data_size;
1679  total_packets += ost->packets_written;
1680 
1681  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1682  i, j, media_type_string(type));
1683  if (ost->encoding_needed) {
1684  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1685  ost->frames_encoded);
1686  if (type == AVMEDIA_TYPE_AUDIO)
1687  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1688  av_log(NULL, AV_LOG_VERBOSE, "; ");
1689  }
1690 
1691  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1693 
1694  av_log(NULL, AV_LOG_VERBOSE, "\n");
1695  }
1696 
1697  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1698  total_packets, total_size);
1699  }
1700  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1701  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1702  if (pass1_used) {
1703  av_log(NULL, AV_LOG_WARNING, "\n");
1704  } else {
1705  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1706  }
1707  }
1708 }
1709 
1710 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1711 {
1712  AVBPrint buf, buf_script;
1713  OutputStream *ost;
1714  AVFormatContext *oc;
1715  int64_t total_size;
1716  AVCodecContext *enc;
1717  int frame_number, vid, i;
1718  double bitrate;
1719  double speed;
1720  int64_t pts = INT64_MIN + 1;
1721  static int64_t last_time = -1;
1722  static int first_report = 1;
1723  static int qp_histogram[52];
1724  int hours, mins, secs, us;
1725  const char *hours_sign;
1726  int ret;
1727  float t;
1728 
1729  if (!print_stats && !is_last_report && !progress_avio)
1730  return;
1731 
1732  if (!is_last_report) {
1733  if (last_time == -1) {
1734  last_time = cur_time;
1735  }
1736  if (((cur_time - last_time) < stats_period && !first_report) ||
1737  (first_report && nb_output_dumped < nb_output_files))
1738  return;
1739  last_time = cur_time;
1740  }
1741 
1742  t = (cur_time-timer_start) / 1000000.0;
1743 
1744 
1745  oc = output_files[0]->ctx;
1746 
1747  total_size = avio_size(oc->pb);
1748  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1749  total_size = avio_tell(oc->pb);
1750 
1751  vid = 0;
1753  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1754  for (i = 0; i < nb_output_streams; i++) {
1755  float q = -1;
1756  ost = output_streams[i];
1757  enc = ost->enc_ctx;
1758  if (!ost->stream_copy)
1759  q = ost->quality / (float) FF_QP2LAMBDA;
1760 
1761  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1762  av_bprintf(&buf, "q=%2.1f ", q);
1763  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1764  ost->file_index, ost->index, q);
1765  }
1766  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1767  float fps;
1768 
1769  frame_number = ost->frame_number;
1770  fps = t > 1 ? frame_number / t : 0;
1771  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1772  frame_number, fps < 9.95, fps, q);
1773  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1774  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1775  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1776  ost->file_index, ost->index, q);
1777  if (is_last_report)
1778  av_bprintf(&buf, "L");
1779  if (qp_hist) {
1780  int j;
1781  int qp = lrintf(q);
1782  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1783  qp_histogram[qp]++;
1784  for (j = 0; j < 32; j++)
1785  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1786  }
1787 
1788  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1789  int j;
1790  double error, error_sum = 0;
1791  double scale, scale_sum = 0;
1792  double p;
1793  char type[3] = { 'Y','U','V' };
1794  av_bprintf(&buf, "PSNR=");
1795  for (j = 0; j < 3; j++) {
1796  if (is_last_report) {
1797  error = enc->error[j];
1798  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1799  } else {
1800  error = ost->error[j];
1801  scale = enc->width * enc->height * 255.0 * 255.0;
1802  }
1803  if (j)
1804  scale /= 4;
1805  error_sum += error;
1806  scale_sum += scale;
1807  p = psnr(error / scale);
1808  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1809  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1810  ost->file_index, ost->index, type[j] | 32, p);
1811  }
1812  p = psnr(error_sum / scale_sum);
1813  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1814  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1815  ost->file_index, ost->index, p);
1816  }
1817  vid = 1;
1818  }
1819  /* compute min output value */
1823  if (copy_ts) {
1824  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1828  }
1829  }
1830 
1831  if (is_last_report)
1833  }
1834 
1835  secs = FFABS(pts) / AV_TIME_BASE;
1836  us = FFABS(pts) % AV_TIME_BASE;
1837  mins = secs / 60;
1838  secs %= 60;
1839  hours = mins / 60;
1840  mins %= 60;
1841  hours_sign = (pts < 0) ? "-" : "";
1842 
1843  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1844  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1845 
1846  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1847  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1848  if (pts == AV_NOPTS_VALUE) {
1849  av_bprintf(&buf, "N/A ");
1850  } else {
1851  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1852  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1853  }
1854 
1855  if (bitrate < 0) {
1856  av_bprintf(&buf, "bitrate=N/A");
1857  av_bprintf(&buf_script, "bitrate=N/A\n");
1858  }else{
1859  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1860  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1861  }
1862 
1863  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1864  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1865  if (pts == AV_NOPTS_VALUE) {
1866  av_bprintf(&buf_script, "out_time_us=N/A\n");
1867  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1868  av_bprintf(&buf_script, "out_time=N/A\n");
1869  } else {
1870  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1871  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1872  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1873  hours_sign, hours, mins, secs, us);
1874  }
1875 
1877  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1878  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1879  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1880 
1881  if (speed < 0) {
1882  av_bprintf(&buf, " speed=N/A");
1883  av_bprintf(&buf_script, "speed=N/A\n");
1884  } else {
1885  av_bprintf(&buf, " speed=%4.3gx", speed);
1886  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1887  }
1888 
1889  if (print_stats || is_last_report) {
1890  const char end = is_last_report ? '\n' : '\r';
1891  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1892  fprintf(stderr, "%s %c", buf.str, end);
1893  } else
1894  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1895 
1896  fflush(stderr);
1897  }
1898  av_bprint_finalize(&buf, NULL);
1899 
1900  if (progress_avio) {
1901  av_bprintf(&buf_script, "progress=%s\n",
1902  is_last_report ? "end" : "continue");
1903  avio_write(progress_avio, buf_script.str,
1904  FFMIN(buf_script.len, buf_script.size - 1));
1906  av_bprint_finalize(&buf_script, NULL);
1907  if (is_last_report) {
1908  if ((ret = avio_closep(&progress_avio)) < 0)
1910  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1911  }
1912  }
1913 
1914  first_report = 0;
1915 
1916  if (is_last_report)
1917  print_final_stats(total_size);
1918 }
1919 
1921 {
1922  // We never got any input. Set a fake format, which will
1923  // come from libavformat.
1924  ifilter->format = par->format;
1925  ifilter->sample_rate = par->sample_rate;
1926  ifilter->channels = par->channels;
1927  ifilter->channel_layout = par->channel_layout;
1928  ifilter->width = par->width;
1929  ifilter->height = par->height;
1930  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1931 }
1932 
1933 static void flush_encoders(void)
1934 {
1935  int i, ret;
1936 
1937  for (i = 0; i < nb_output_streams; i++) {
1939  AVCodecContext *enc = ost->enc_ctx;
1941 
1942  if (!ost->encoding_needed)
1943  continue;
1944 
1945  // Try to enable encoding with no input frames.
1946  // Maybe we should just let encoding fail instead.
1947  if (!ost->initialized) {
1948  FilterGraph *fg = ost->filter->graph;
1949 
1951  "Finishing stream %d:%d without any data written to it.\n",
1952  ost->file_index, ost->st->index);
1953 
1954  if (ost->filter && !fg->graph) {
1955  int x;
1956  for (x = 0; x < fg->nb_inputs; x++) {
1957  InputFilter *ifilter = fg->inputs[x];
1958  if (ifilter->format < 0)
1959  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1960  }
1961 
1963  continue;
1964 
1965  ret = configure_filtergraph(fg);
1966  if (ret < 0) {
1967  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1968  exit_program(1);
1969  }
1970 
1972  }
1973 
1975  }
1976 
1978  continue;
1979 
1980  for (;;) {
1981  const char *desc = NULL;
1982  AVPacket *pkt = ost->pkt;
1983  int pkt_size;
1984 
1985  switch (enc->codec_type) {
1986  case AVMEDIA_TYPE_AUDIO:
1987  desc = "audio";
1988  break;
1989  case AVMEDIA_TYPE_VIDEO:
1990  desc = "video";
1991  break;
1992  default:
1993  av_assert0(0);
1994  }
1995 
1997 
1999  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
2000  ret = avcodec_send_frame(enc, NULL);
2001  if (ret < 0) {
2002  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2003  desc,
2004  av_err2str(ret));
2005  exit_program(1);
2006  }
2007  }
2008 
2009  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2010  if (ret < 0 && ret != AVERROR_EOF) {
2011  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2012  desc,
2013  av_err2str(ret));
2014  exit_program(1);
2015  }
2016  if (ost->logfile && enc->stats_out) {
2017  fprintf(ost->logfile, "%s", enc->stats_out);
2018  }
2019  if (ret == AVERROR_EOF) {
2020  output_packet(of, pkt, ost, 1);
2021  break;
2022  }
2023  if (ost->finished & MUXER_FINISHED) {
2025  continue;
2026  }
2028  pkt_size = pkt->size;
2029  output_packet(of, pkt, ost, 0);
2031  do_video_stats(ost, pkt_size);
2032  }
2033  }
2034  }
2035 }
2036 
2037 /*
2038  * Check whether a packet from ist should be written into ost at this time
2039  */
2041 {
2043  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2044 
2045  if (ost->source_index != ist_index)
2046  return 0;
2047 
2048  if (ost->finished)
2049  return 0;
2050 
2051  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2052  return 0;
2053 
2054  return 1;
2055 }
2056 
2058 {
2060  InputFile *f = input_files [ist->file_index];
2061  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2062  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2063  AVPacket *opkt = ost->pkt;
2064 
2065  av_packet_unref(opkt);
2066  // EOF: flush output bitstream filters.
2067  if (!pkt) {
2068  output_packet(of, opkt, ost, 1);
2069  return;
2070  }
2071 
2072  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2074  return;
2075 
2076  if (!ost->frame_number && !ost->copy_prior_start) {
2077  int64_t comp_start = start_time;
2078  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2079  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2080  if (pkt->pts == AV_NOPTS_VALUE ?
2081  ist->pts < comp_start :
2082  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2083  return;
2084  }
2085 
2086  if (of->recording_time != INT64_MAX &&
2087  ist->pts >= of->recording_time + start_time) {
2089  return;
2090  }
2091 
2092  if (f->recording_time != INT64_MAX) {
2093  start_time = 0;
2094  if (copy_ts) {
2095  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2096  start_time += start_at_zero ? 0 : f->ctx->start_time;
2097  }
2098  if (ist->pts >= f->recording_time + start_time) {
2100  return;
2101  }
2102  }
2103 
2104  /* force the input stream PTS */
2106  ost->sync_opts++;
2107 
2108  if (av_packet_ref(opkt, pkt) < 0)
2109  exit_program(1);
2110 
2111  if (pkt->pts != AV_NOPTS_VALUE)
2112  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2113 
2114  if (pkt->dts == AV_NOPTS_VALUE) {
2116  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2117  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2118  if(!duration)
2119  duration = ist->dec_ctx->frame_size;
2120  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2121  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2122  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2123  /* dts will be set immediately afterwards to what pts is now */
2124  opkt->pts = opkt->dts - ost_tb_start_time;
2125  } else
2126  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2127  opkt->dts -= ost_tb_start_time;
2128 
2129  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2130 
2131  output_packet(of, opkt, ost, 0);
2132 }
2133 
2135 {
2136  AVCodecContext *dec = ist->dec_ctx;
2137 
2138  if (!dec->channel_layout) {
2139  char layout_name[256];
2140 
2141  if (dec->channels > ist->guess_layout_max)
2142  return 0;
2143  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2144  if (!dec->channel_layout)
2145  return 0;
2146  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2147  dec->channels, dec->channel_layout);
2148  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2149  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2150  }
2151  return 1;
2152 }
2153 
2155 {
2156  if (*got_output || ret<0)
2157  decode_error_stat[ret<0] ++;
2158 
2159  if (ret < 0 && exit_on_error)
2160  exit_program(1);
2161 
2162  if (*got_output && ist) {
2163  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2165  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2166  if (exit_on_error)
2167  exit_program(1);
2168  }
2169  }
2170 }
2171 
2172 // Filters can be configured only if the formats of all inputs are known.
2174 {
2175  int i;
2176  for (i = 0; i < fg->nb_inputs; i++) {
2177  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2178  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2179  return 0;
2180  }
2181  return 1;
2182 }
2183 
2185 {
2186  FilterGraph *fg = ifilter->graph;
2187  AVFrameSideData *sd;
2188  int need_reinit, ret, i;
2189 
2190  /* determine if the parameters for this input changed */
2191  need_reinit = ifilter->format != frame->format;
2192 
2193  switch (ifilter->ist->st->codecpar->codec_type) {
2194  case AVMEDIA_TYPE_AUDIO:
2195  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2196  ifilter->channels != frame->channels ||
2197  ifilter->channel_layout != frame->channel_layout;
2198  break;
2199  case AVMEDIA_TYPE_VIDEO:
2200  need_reinit |= ifilter->width != frame->width ||
2201  ifilter->height != frame->height;
2202  break;
2203  }
2204 
2205  if (!ifilter->ist->reinit_filters && fg->graph)
2206  need_reinit = 0;
2207 
2208  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2209  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2210  need_reinit = 1;
2211 
2213  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2214  need_reinit = 1;
2215  } else if (ifilter->displaymatrix)
2216  need_reinit = 1;
2217 
2218  if (need_reinit) {
2220  if (ret < 0)
2221  return ret;
2222  }
2223 
2224  /* (re)init the graph if possible, otherwise buffer the frame and return */
2225  if (need_reinit || !fg->graph) {
2226  for (i = 0; i < fg->nb_inputs; i++) {
2227  if (!ifilter_has_all_input_formats(fg)) {
2229  if (!tmp)
2230  return AVERROR(ENOMEM);
2232 
2233  if (!av_fifo_space(ifilter->frame_queue)) {
2234  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2235  if (ret < 0) {
2236  av_frame_free(&tmp);
2237  return ret;
2238  }
2239  }
2240  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2241  return 0;
2242  }
2243  }
2244 
2245  ret = reap_filters(1);
2246  if (ret < 0 && ret != AVERROR_EOF) {
2247  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2248  return ret;
2249  }
2250 
2251  ret = configure_filtergraph(fg);
2252  if (ret < 0) {
2253  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2254  return ret;
2255  }
2256  }
2257 
2259  if (ret < 0) {
2260  if (ret != AVERROR_EOF)
2261  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2262  return ret;
2263  }
2264 
2265  return 0;
2266 }
2267 
2268 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2269 {
2270  int ret;
2271 
2272  ifilter->eof = 1;
2273 
2274  if (ifilter->filter) {
2276  if (ret < 0)
2277  return ret;
2278  } else {
2279  // the filtergraph was never configured
2280  if (ifilter->format < 0)
2281  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2282  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2283  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2284  return AVERROR_INVALIDDATA;
2285  }
2286  }
2287 
2288  return 0;
2289 }
2290 
2291 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2292 // There is the following difference: if you got a frame, you must call
2293 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2294 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2295 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2296 {
2297  int ret;
2298 
2299  *got_frame = 0;
2300 
2301  if (pkt) {
2302  ret = avcodec_send_packet(avctx, pkt);
2303  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2304  // decoded frames with avcodec_receive_frame() until done.
2305  if (ret < 0 && ret != AVERROR_EOF)
2306  return ret;
2307  }
2308 
2309  ret = avcodec_receive_frame(avctx, frame);
2310  if (ret < 0 && ret != AVERROR(EAGAIN))
2311  return ret;
2312  if (ret >= 0)
2313  *got_frame = 1;
2314 
2315  return 0;
2316 }
2317 
2319 {
2320  int i, ret;
2321  AVFrame *f;
2322 
2323  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2324  for (i = 0; i < ist->nb_filters; i++) {
2325  if (i < ist->nb_filters - 1) {
2326  f = ist->filter_frame;
2328  if (ret < 0)
2329  break;
2330  } else
2331  f = decoded_frame;
2332  ret = ifilter_send_frame(ist->filters[i], f);
2333  if (ret == AVERROR_EOF)
2334  ret = 0; /* ignore */
2335  if (ret < 0) {
2337  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2338  break;
2339  }
2340  }
2341  return ret;
2342 }
2343 
2345  int *decode_failed)
2346 {
2348  AVCodecContext *avctx = ist->dec_ctx;
2349  int ret, err = 0;
2350  AVRational decoded_frame_tb;
2351 
2352  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2353  return AVERROR(ENOMEM);
2354  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2355  return AVERROR(ENOMEM);
2356  decoded_frame = ist->decoded_frame;
2357 
2359  ret = decode(avctx, decoded_frame, got_output, pkt);
2360  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2361  if (ret < 0)
2362  *decode_failed = 1;
2363 
2364  if (ret >= 0 && avctx->sample_rate <= 0) {
2365  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2367  }
2368 
2369  if (ret != AVERROR_EOF)
2371 
2372  if (!*got_output || ret < 0)
2373  return ret;
2374 
2375  ist->samples_decoded += decoded_frame->nb_samples;
2376  ist->frames_decoded++;
2377 
2378  /* increment next_dts to use for the case where the input stream does not
2379  have timestamps or there are multiple frames in the packet */
2380  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2381  avctx->sample_rate;
2382  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2383  avctx->sample_rate;
2384 
2385  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2386  decoded_frame_tb = ist->st->time_base;
2387  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2388  decoded_frame->pts = pkt->pts;
2389  decoded_frame_tb = ist->st->time_base;
2390  }else {
2391  decoded_frame->pts = ist->dts;
2392  decoded_frame_tb = AV_TIME_BASE_Q;
2393  }
2395  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2396  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2397  (AVRational){1, avctx->sample_rate});
2398  ist->nb_samples = decoded_frame->nb_samples;
2400 
2401  av_frame_unref(ist->filter_frame);
2403  return err < 0 ? err : ret;
2404 }
2405 
2406 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2407  int *decode_failed)
2408 {
2410  int i, ret = 0, err = 0;
2411  int64_t best_effort_timestamp;
2412  int64_t dts = AV_NOPTS_VALUE;
2413 
2414  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2415  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2416  // skip the packet.
2417  if (!eof && pkt && pkt->size == 0)
2418  return 0;
2419 
2420  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2421  return AVERROR(ENOMEM);
2422  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2423  return AVERROR(ENOMEM);
2424  decoded_frame = ist->decoded_frame;
2425  if (ist->dts != AV_NOPTS_VALUE)
2426  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2427  if (pkt) {
2428  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2429  }
2430 
2431  // The old code used to set dts on the drain packet, which does not work
2432  // with the new API anymore.
2433  if (eof) {
2434  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2435  if (!new)
2436  return AVERROR(ENOMEM);
2437  ist->dts_buffer = new;
2438  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2439  }
2440 
2442  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2443  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2444  if (ret < 0)
2445  *decode_failed = 1;
2446 
2447  // The following line may be required in some cases where there is no parser
2448  // or the parser does not has_b_frames correctly
2449  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2450  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2451  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2452  } else
2453  av_log(ist->dec_ctx, AV_LOG_WARNING,
2454  "video_delay is larger in decoder than demuxer %d > %d.\n"
2455  "If you want to help, upload a sample "
2456  "of this file to https://streams.videolan.org/upload/ "
2457  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2458  ist->dec_ctx->has_b_frames,
2459  ist->st->codecpar->video_delay);
2460  }
2461 
2462  if (ret != AVERROR_EOF)
2464 
2465  if (*got_output && ret >= 0) {
2466  if (ist->dec_ctx->width != decoded_frame->width ||
2467  ist->dec_ctx->height != decoded_frame->height ||
2468  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2469  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2473  ist->dec_ctx->width,
2474  ist->dec_ctx->height,
2475  ist->dec_ctx->pix_fmt);
2476  }
2477  }
2478 
2479  if (!*got_output || ret < 0)
2480  return ret;
2481 
2482  if(ist->top_field_first>=0)
2483  decoded_frame->top_field_first = ist->top_field_first;
2484 
2485  ist->frames_decoded++;
2486 
2487  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2488  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2489  if (err < 0)
2490  goto fail;
2491  }
2492  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2493 
2494  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2495  *duration_pts = decoded_frame->pkt_duration;
2496 
2497  if (ist->framerate.num)
2498  best_effort_timestamp = ist->cfr_next_pts++;
2499 
2500  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2501  best_effort_timestamp = ist->dts_buffer[0];
2502 
2503  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2504  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2505  ist->nb_dts_buffer--;
2506  }
2507 
2508  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2509  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2510 
2511  if (ts != AV_NOPTS_VALUE)
2512  ist->next_pts = ist->pts = ts;
2513  }
2514 
2515  if (debug_ts) {
2516  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2517  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2518  ist->st->index, av_ts2str(decoded_frame->pts),
2519  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2520  best_effort_timestamp,
2521  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2523  ist->st->time_base.num, ist->st->time_base.den);
2524  }
2525 
2526  if (ist->st->sample_aspect_ratio.num)
2527  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2528 
2530 
2531 fail:
2532  av_frame_unref(ist->filter_frame);
2534  return err < 0 ? err : ret;
2535 }
2536 
2538  int *decode_failed)
2539 {
2541  int free_sub = 1;
2542  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2543  &subtitle, got_output, pkt);
2544 
2546 
2547  if (ret < 0 || !*got_output) {
2548  *decode_failed = 1;
2549  if (!pkt->size)
2551  return ret;
2552  }
2553 
2554  if (ist->fix_sub_duration) {
2555  int end = 1;
2556  if (ist->prev_sub.got_output) {
2557  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2558  1000, AV_TIME_BASE);
2559  if (end < ist->prev_sub.subtitle.end_display_time) {
2560  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2561  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2562  ist->prev_sub.subtitle.end_display_time, end,
2563  end <= 0 ? ", dropping it" : "");
2564  ist->prev_sub.subtitle.end_display_time = end;
2565  }
2566  }
2567  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2568  FFSWAP(int, ret, ist->prev_sub.ret);
2569  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2570  if (end <= 0)
2571  goto out;
2572  }
2573 
2574  if (!*got_output)
2575  return ret;
2576 
2577  if (ist->sub2video.frame) {
2578  sub2video_update(ist, INT64_MIN, &subtitle);
2579  } else if (ist->nb_filters) {
2580  if (!ist->sub2video.sub_queue)
2581  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2582  if (!ist->sub2video.sub_queue)
2583  exit_program(1);
2584  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2585  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2586  if (ret < 0)
2587  exit_program(1);
2588  }
2589  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2590  free_sub = 0;
2591  }
2592 
2593  if (!subtitle.num_rects)
2594  goto out;
2595 
2596  ist->frames_decoded++;
2597 
2598  for (i = 0; i < nb_output_streams; i++) {
2600 
2601  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2602  exit_program(1);
2604  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2605  continue;
2606 
2608  }
2609 
2610 out:
2611  if (free_sub)
2613  return ret;
2614 }
2615 
2617 {
2618  int i, ret;
2619  /* TODO keep pts also in stream time base to avoid converting back */
2620  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2622 
2623  for (i = 0; i < ist->nb_filters; i++) {
2624  ret = ifilter_send_eof(ist->filters[i], pts);
2625  if (ret < 0)
2626  return ret;
2627  }
2628  return 0;
2629 }
2630 
2631 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2632 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2633 {
2634  int ret = 0, i;
2635  int repeating = 0;
2636  int eof_reached = 0;
2637 
2638  AVPacket *avpkt;
2639 
2640  if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2641  return AVERROR(ENOMEM);
2642  avpkt = ist->pkt;
2643 
2644  if (!ist->saw_first_ts) {
2645  ist->first_dts =
2646  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2647  ist->pts = 0;
2648  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2649  ist->first_dts =
2650  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2651  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2652  }
2653  ist->saw_first_ts = 1;
2654  }
2655 
2656  if (ist->next_dts == AV_NOPTS_VALUE)
2657  ist->next_dts = ist->dts;
2658  if (ist->next_pts == AV_NOPTS_VALUE)
2659  ist->next_pts = ist->pts;
2660 
2661  if (pkt) {
2662  av_packet_unref(avpkt);
2663  ret = av_packet_ref(avpkt, pkt);
2664  if (ret < 0)
2665  return ret;
2666  }
2667 
2668  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2669  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2670  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2671  ist->next_pts = ist->pts = ist->dts;
2672  }
2673 
2674  // while we have more to decode or while the decoder did output something on EOF
2675  while (ist->decoding_needed) {
2676  int64_t duration_dts = 0;
2677  int64_t duration_pts = 0;
2678  int got_output = 0;
2679  int decode_failed = 0;
2680 
2681  ist->pts = ist->next_pts;
2682  ist->dts = ist->next_dts;
2683 
2684  switch (ist->dec_ctx->codec_type) {
2685  case AVMEDIA_TYPE_AUDIO:
2686  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2687  &decode_failed);
2688  av_packet_unref(avpkt);
2689  break;
2690  case AVMEDIA_TYPE_VIDEO:
2691  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2692  &decode_failed);
2693  if (!repeating || !pkt || got_output) {
2694  if (pkt && pkt->duration) {
2695  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2696  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2697  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2698  duration_dts = ((int64_t)AV_TIME_BASE *
2699  ist->dec_ctx->framerate.den * ticks) /
2700  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2701  }
2702 
2703  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2704  ist->next_dts += duration_dts;
2705  }else
2706  ist->next_dts = AV_NOPTS_VALUE;
2707  }
2708 
2709  if (got_output) {
2710  if (duration_pts > 0) {
2711  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2712  } else {
2713  ist->next_pts += duration_dts;
2714  }
2715  }
2716  av_packet_unref(avpkt);
2717  break;
2718  case AVMEDIA_TYPE_SUBTITLE:
2719  if (repeating)
2720  break;
2721  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2722  if (!pkt && ret >= 0)
2723  ret = AVERROR_EOF;
2724  av_packet_unref(avpkt);
2725  break;
2726  default:
2727  return -1;
2728  }
2729 
2730  if (ret == AVERROR_EOF) {
2731  eof_reached = 1;
2732  break;
2733  }
2734 
2735  if (ret < 0) {
2736  if (decode_failed) {
2737  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2738  ist->file_index, ist->st->index, av_err2str(ret));
2739  } else {
2740  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2741  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2742  }
2743  if (!decode_failed || exit_on_error)
2744  exit_program(1);
2745  break;
2746  }
2747 
2748  if (got_output)
2749  ist->got_output = 1;
2750 
2751  if (!got_output)
2752  break;
2753 
2754  // During draining, we might get multiple output frames in this loop.
2755  // ffmpeg.c does not drain the filter chain on configuration changes,
2756  // which means if we send multiple frames at once to the filters, and
2757  // one of those frames changes configuration, the buffered frames will
2758  // be lost. This can upset certain FATE tests.
2759  // Decode only 1 frame per call on EOF to appease these FATE tests.
2760  // The ideal solution would be to rewrite decoding to use the new
2761  // decoding API in a better way.
2762  if (!pkt)
2763  break;
2764 
2765  repeating = 1;
2766  }
2767 
2768  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2769  /* except when looping we need to flush but not to send an EOF */
2770  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2771  int ret = send_filter_eof(ist);
2772  if (ret < 0) {
2773  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2774  exit_program(1);
2775  }
2776  }
2777 
2778  /* handle stream copy */
2779  if (!ist->decoding_needed && pkt) {
2780  ist->dts = ist->next_dts;
2781  switch (ist->dec_ctx->codec_type) {
2782  case AVMEDIA_TYPE_AUDIO:
2783  av_assert1(pkt->duration >= 0);
2784  if (ist->dec_ctx->sample_rate) {
2785  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2786  ist->dec_ctx->sample_rate;
2787  } else {
2788  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2789  }
2790  break;
2791  case AVMEDIA_TYPE_VIDEO:
2792  if (ist->framerate.num) {
2793  // TODO: Remove work-around for c99-to-c89 issue 7
2794  AVRational time_base_q = AV_TIME_BASE_Q;
2795  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2796  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2797  } else if (pkt->duration) {
2798  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2799  } else if(ist->dec_ctx->framerate.num != 0) {
2800  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2801  ist->next_dts += ((int64_t)AV_TIME_BASE *
2802  ist->dec_ctx->framerate.den * ticks) /
2803  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2804  }
2805  break;
2806  }
2807  ist->pts = ist->dts;
2808  ist->next_pts = ist->next_dts;
2809  }
2810  for (i = 0; i < nb_output_streams; i++) {
2812 
2813  if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2814  exit_program(1);
2816  continue;
2817 
2818  do_streamcopy(ist, ost, pkt);
2819  }
2820 
2821  return !eof_reached;
2822 }
2823 
2824 static void print_sdp(void)
2825 {
2826  char sdp[16384];
2827  int i;
2828  int j;
2829  AVIOContext *sdp_pb;
2830  AVFormatContext **avc;
2831 
2832  for (i = 0; i < nb_output_files; i++) {
2833  if (!output_files[i]->header_written)
2834  return;
2835  }
2836 
2837  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2838  if (!avc)
2839  exit_program(1);
2840  for (i = 0, j = 0; i < nb_output_files; i++) {
2841  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2842  avc[j] = output_files[i]->ctx;
2843  j++;
2844  }
2845  }
2846 
2847  if (!j)
2848  goto fail;
2849 
2850  av_sdp_create(avc, j, sdp, sizeof(sdp));
2851 
2852  if (!sdp_filename) {
2853  printf("SDP:\n%s\n", sdp);
2854  fflush(stdout);
2855  } else {
2856  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2857  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2858  } else {
2859  avio_print(sdp_pb, sdp);
2860  avio_closep(&sdp_pb);
2862  }
2863  }
2864 
2865 fail:
2866  av_freep(&avc);
2867 }
2868 
2870 {
2871  InputStream *ist = s->opaque;
2872  const enum AVPixelFormat *p;
2873  int ret;
2874 
2875  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2877  const AVCodecHWConfig *config = NULL;
2878  int i;
2879 
2880  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2881  break;
2882 
2883  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2884  ist->hwaccel_id == HWACCEL_AUTO) {
2885  for (i = 0;; i++) {
2886  config = avcodec_get_hw_config(s->codec, i);
2887  if (!config)
2888  break;
2889  if (!(config->methods &
2891  continue;
2892  if (config->pix_fmt == *p)
2893  break;
2894  }
2895  }
2896  if (config) {
2897  if (config->device_type != ist->hwaccel_device_type) {
2898  // Different hwaccel offered, ignore.
2899  continue;
2900  }
2901 
2903  if (ret < 0) {
2904  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2906  "%s hwaccel requested for input stream #%d:%d, "
2907  "but cannot be initialized.\n",
2908  av_hwdevice_get_type_name(config->device_type),
2909  ist->file_index, ist->st->index);
2910  return AV_PIX_FMT_NONE;
2911  }
2912  continue;
2913  }
2914  } else {
2915  const HWAccel *hwaccel = NULL;
2916  int i;
2917  for (i = 0; hwaccels[i].name; i++) {
2918  if (hwaccels[i].pix_fmt == *p) {
2919  hwaccel = &hwaccels[i];
2920  break;
2921  }
2922  }
2923  if (!hwaccel) {
2924  // No hwaccel supporting this pixfmt.
2925  continue;
2926  }
2927  if (hwaccel->id != ist->hwaccel_id) {
2928  // Does not match requested hwaccel.
2929  continue;
2930  }
2931 
2932  ret = hwaccel->init(s);
2933  if (ret < 0) {
2935  "%s hwaccel requested for input stream #%d:%d, "
2936  "but cannot be initialized.\n", hwaccel->name,
2937  ist->file_index, ist->st->index);
2938  return AV_PIX_FMT_NONE;
2939  }
2940  }
2941 
2942  if (ist->hw_frames_ctx) {
2943  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2944  if (!s->hw_frames_ctx)
2945  return AV_PIX_FMT_NONE;
2946  }
2947 
2948  ist->hwaccel_pix_fmt = *p;
2949  break;
2950  }
2951 
2952  return *p;
2953 }
2954 
2956 {
2957  InputStream *ist = s->opaque;
2958 
2959  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2960  return ist->hwaccel_get_buffer(s, frame, flags);
2961 
2963 }
2964 
2965 static int init_input_stream(int ist_index, char *error, int error_len)
2966 {
2967  int ret;
2968  InputStream *ist = input_streams[ist_index];
2969 
2970  if (ist->decoding_needed) {
2971  const AVCodec *codec = ist->dec;
2972  if (!codec) {
2973  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2974  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2975  return AVERROR(EINVAL);
2976  }
2977 
2978  ist->dec_ctx->opaque = ist;
2979  ist->dec_ctx->get_format = get_format;
2980  ist->dec_ctx->get_buffer2 = get_buffer;
2981 #if LIBAVCODEC_VERSION_MAJOR < 60
2983  ist->dec_ctx->thread_safe_callbacks = 1;
2985 #endif
2986 
2987  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2988  (ist->decoding_needed & DECODING_FOR_OST)) {
2989  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2990  if (ist->decoding_needed & DECODING_FOR_FILTER)
2991  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2992  }
2993 
2994  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2995  * audio, and video decoders such as cuvid or mediacodec */
2996  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2997 
2998  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2999  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3000  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3001  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3002  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3003 
3005  if (ret < 0) {
3006  snprintf(error, error_len, "Device setup failed for "
3007  "decoder on input stream #%d:%d : %s",
3008  ist->file_index, ist->st->index, av_err2str(ret));
3009  return ret;
3010  }
3011 
3012  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3013  if (ret == AVERROR_EXPERIMENTAL)
3014  abort_codec_experimental(codec, 0);
3015 
3016  snprintf(error, error_len,
3017  "Error while opening decoder for input stream "
3018  "#%d:%d : %s",
3019  ist->file_index, ist->st->index, av_err2str(ret));
3020  return ret;
3021  }
3022  assert_avoptions(ist->decoder_opts);
3023  }
3024 
3025  ist->next_pts = AV_NOPTS_VALUE;
3026  ist->next_dts = AV_NOPTS_VALUE;
3027 
3028  return 0;
3029 }
3030 
3032 {
3033  if (ost->source_index >= 0)
3034  return input_streams[ost->source_index];
3035  return NULL;
3036 }
3037 
3038 static int compare_int64(const void *a, const void *b)
3039 {
3040  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3041 }
3042 
3043 /* open the muxer when all the streams are initialized */
3045 {
3046  int ret, i;
3047 
3048  for (i = 0; i < of->ctx->nb_streams; i++) {
3050  if (!ost->initialized)
3051  return 0;
3052  }
3053 
3054  of->ctx->interrupt_callback = int_cb;
3055 
3056  ret = avformat_write_header(of->ctx, &of->opts);
3057  if (ret < 0) {
3059  "Could not write header for output file #%d "
3060  "(incorrect codec parameters ?): %s\n",
3062  return ret;
3063  }
3064  //assert_avoptions(of->opts);
3065  of->header_written = 1;
3066 
3067  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3068  nb_output_dumped++;
3069 
3070  if (sdp_filename || want_sdp)
3071  print_sdp();
3072 
3073  /* flush the muxing queues */
3074  for (i = 0; i < of->ctx->nb_streams; i++) {
3076 
3077  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3078  if (!av_fifo_size(ost->muxing_queue))
3080 
3081  while (av_fifo_size(ost->muxing_queue)) {
3082  AVPacket *pkt;
3085  write_packet(of, pkt, ost, 1);
3086  av_packet_free(&pkt);
3087  }
3088  }
3089 
3090  return 0;
3091 }
3092 
3094 {
3096  int ret;
3097 
3098  if (!ctx)
3099  return 0;
3100 
3101  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3102  if (ret < 0)
3103  return ret;
3104 
3105  ctx->time_base_in = ost->st->time_base;
3106 
3107  ret = av_bsf_init(ctx);
3108  if (ret < 0) {
3109  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3110  ctx->filter->name);
3111  return ret;
3112  }
3113 
3114  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3115  if (ret < 0)
3116  return ret;
3117  ost->st->time_base = ctx->time_base_out;
3118 
3119  return 0;
3120 }
3121 
3123 {
3126  AVCodecParameters *par_dst = ost->st->codecpar;
3127  AVCodecParameters *par_src = ost->ref_par;
3128  AVRational sar;
3129  int i, ret;
3130  uint32_t codec_tag = par_dst->codec_tag;
3131 
3132  av_assert0(ist && !ost->filter);
3133 
3134  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3135  if (ret >= 0)
3137  if (ret < 0) {
3139  "Error setting up codec context options.\n");
3140  return ret;
3141  }
3142 
3144  if (ret < 0) {
3146  "Error getting reference codec parameters.\n");
3147  return ret;
3148  }
3149 
3150  if (!codec_tag) {
3151  unsigned int codec_tag_tmp;
3152  if (!of->ctx->oformat->codec_tag ||
3153  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3154  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3155  codec_tag = par_src->codec_tag;
3156  }
3157 
3158  ret = avcodec_parameters_copy(par_dst, par_src);
3159  if (ret < 0)
3160  return ret;
3161 
3162  par_dst->codec_tag = codec_tag;
3163 
3164  if (!ost->frame_rate.num)
3165  ost->frame_rate = ist->framerate;
3166 
3167  if (ost->frame_rate.num)
3169  else
3170  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3171 
3173  if (ret < 0)
3174  return ret;
3175 
3176  // copy timebase while removing common factors
3177  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3178  if (ost->frame_rate.num)
3180  else
3182  }
3183 
3184  // copy estimated duration as a hint to the muxer
3185  if (ost->st->duration <= 0 && ist->st->duration > 0)
3186  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3187 
3188  // copy disposition
3189  ost->st->disposition = ist->st->disposition;
3190 
3191  if (ist->st->nb_side_data) {
3192  for (i = 0; i < ist->st->nb_side_data; i++) {
3193  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3194  uint8_t *dst_data;
3195 
3196  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3197  if (!dst_data)
3198  return AVERROR(ENOMEM);
3199  memcpy(dst_data, sd_src->data, sd_src->size);
3200  }
3201  }
3202 
3203  if (ost->rotate_overridden) {
3205  sizeof(int32_t) * 9);
3206  if (sd)
3208  }
3209 
3210  switch (par_dst->codec_type) {
3211  case AVMEDIA_TYPE_AUDIO:
3212  if (audio_volume != 256) {
3213  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3214  exit_program(1);
3215  }
3216  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3217  par_dst->block_align= 0;
3218  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3219  par_dst->block_align= 0;
3220  break;
3221  case AVMEDIA_TYPE_VIDEO:
3222  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3223  sar =
3225  (AVRational){ par_dst->height, par_dst->width });
3226  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3227  "with stream copy may produce invalid files\n");
3228  }
3229  else if (ist->st->sample_aspect_ratio.num)
3230  sar = ist->st->sample_aspect_ratio;
3231  else
3232  sar = par_src->sample_aspect_ratio;
3233  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3234  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3235  ost->st->r_frame_rate = ist->st->r_frame_rate;
3236  break;
3237  }
3238 
3239  ost->mux_timebase = ist->st->time_base;
3240 
3241  return 0;
3242 }
3243 
3245 {
3246  AVDictionaryEntry *e;
3247 
3248  uint8_t *encoder_string;
3249  int encoder_string_len;
3250  int format_flags = 0;
3251  int codec_flags = ost->enc_ctx->flags;
3252 
3253  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3254  return;
3255 
3256  e = av_dict_get(of->opts, "fflags", NULL, 0);
3257  if (e) {
3258  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3259  if (!o)
3260  return;
3261  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3262  }
3263  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3264  if (e) {
3265  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3266  if (!o)
3267  return;
3269  }
3270 
3271  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3272  encoder_string = av_mallocz(encoder_string_len);
3273  if (!encoder_string)
3274  exit_program(1);
3275 
3276  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3277  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3278  else
3279  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3280  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3281  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3283 }
3284 
3286  AVCodecContext *avctx)
3287 {
3288  char *p;
3289  int n = 1, i, size, index = 0;
3290  int64_t t, *pts;
3291 
3292  for (p = kf; *p; p++)
3293  if (*p == ',')
3294  n++;
3295  size = n;
3296  pts = av_malloc_array(size, sizeof(*pts));
3297  if (!pts) {
3298  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3299  exit_program(1);
3300  }
3301 
3302  p = kf;
3303  for (i = 0; i < n; i++) {
3304  char *next = strchr(p, ',');
3305 
3306  if (next)
3307  *next++ = 0;
3308 
3309  if (!memcmp(p, "chapters", 8)) {
3310 
3312  int j;
3313 
3314  if (avf->nb_chapters > INT_MAX - size ||
3315  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3316  sizeof(*pts)))) {
3318  "Could not allocate forced key frames array.\n");
3319  exit_program(1);
3320  }
3321  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3322  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3323 
3324  for (j = 0; j < avf->nb_chapters; j++) {
3325  AVChapter *c = avf->chapters[j];
3326  av_assert1(index < size);
3327  pts[index++] = av_rescale_q(c->start, c->time_base,
3328  avctx->time_base) + t;
3329  }
3330 
3331  } else {
3332 
3333  t = parse_time_or_die("force_key_frames", p, 1);
3334  av_assert1(index < size);
3335  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3336 
3337  }
3338 
3339  p = next;
3340  }
3341 
3342  av_assert0(index == size);
3343  qsort(pts, size, sizeof(*pts), compare_int64);
3345  ost->forced_kf_pts = pts;
3346 }
3347 
3348 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3349 {
3351  AVCodecContext *enc_ctx = ost->enc_ctx;
3352  AVFormatContext *oc;
3353 
3354  if (ost->enc_timebase.num > 0) {
3355  enc_ctx->time_base = ost->enc_timebase;
3356  return;
3357  }
3358 
3359  if (ost->enc_timebase.num < 0) {
3360  if (ist) {
3361  enc_ctx->time_base = ist->st->time_base;
3362  return;
3363  }
3364 
3365  oc = output_files[ost->file_index]->ctx;
3366  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3367  }
3368 
3369  enc_ctx->time_base = default_time_base;
3370 }
3371 
3373 {
3375  AVCodecContext *enc_ctx = ost->enc_ctx;
3378  int j, ret;
3379 
3381 
3382  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3383  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3384  // which have to be filtered out to prevent leaking them to output files.
3385  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3386 
3387  if (ist) {
3388  ost->st->disposition = ist->st->disposition;
3389 
3390  dec_ctx = ist->dec_ctx;
3391 
3393  } else {
3394  for (j = 0; j < oc->nb_streams; j++) {
3395  AVStream *st = oc->streams[j];
3396  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3397  break;
3398  }
3399  if (j == oc->nb_streams)
3403  }
3404 
3405  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3406  if (!ost->frame_rate.num)
3408  if (ist && !ost->frame_rate.num)
3409  ost->frame_rate = ist->framerate;
3410  if (ist && !ost->frame_rate.num)
3411  ost->frame_rate = ist->st->r_frame_rate;
3412  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3413  ost->frame_rate = (AVRational){25, 1};
3415  "No information "
3416  "about the input framerate is available. Falling "
3417  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3418  "if you want a different framerate.\n",
3419  ost->file_index, ost->index);
3420  }
3421 
3422  if (ost->max_frame_rate.num &&
3424  !ost->frame_rate.den))
3426 
3427  if (ost->enc->supported_framerates && !ost->force_fps) {
3428  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3429  ost->frame_rate = ost->enc->supported_framerates[idx];
3430  }
3431  // reduce frame rate for mpeg4 to be within the spec limits
3432  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3434  ost->frame_rate.num, ost->frame_rate.den, 65535);
3435  }
3436  }
3437 
3438  switch (enc_ctx->codec_type) {
3439  case AVMEDIA_TYPE_AUDIO:
3441  if (dec_ctx)
3443  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3447 
3449  break;
3450 
3451  case AVMEDIA_TYPE_VIDEO:
3453 
3454  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3456  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3458  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3459  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3460  }
3461 
3462  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3463  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3465  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3466  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3468 
3470  if (dec_ctx)
3472  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3473 
3474  if (frame) {
3475  enc_ctx->color_range = frame->color_range;
3476  enc_ctx->color_primaries = frame->color_primaries;
3477  enc_ctx->color_trc = frame->color_trc;
3478  enc_ctx->colorspace = frame->colorspace;
3479  enc_ctx->chroma_sample_location = frame->chroma_location;
3480  }
3481 
3482  enc_ctx->framerate = ost->frame_rate;
3483 
3485 
3486  if (!dec_ctx ||
3487  enc_ctx->width != dec_ctx->width ||
3488  enc_ctx->height != dec_ctx->height ||
3489  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3491  }
3492 
3493  // Field order: autodetection
3494  if (frame) {
3496  ost->top_field_first >= 0)
3497  frame->top_field_first = !!ost->top_field_first;
3498 
3499  if (frame->interlaced_frame) {
3500  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3501  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3502  else
3503  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3504  } else
3505  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3506  }
3507 
3508  // Field order: override
3509  if (ost->top_field_first == 0) {
3510  enc_ctx->field_order = AV_FIELD_BB;
3511  } else if (ost->top_field_first == 1) {
3512  enc_ctx->field_order = AV_FIELD_TT;
3513  }
3514 
3515  if (ost->forced_keyframes) {
3516  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3519  if (ret < 0) {
3521  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3522  return ret;
3523  }
3528 
3529  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3530  // parse it only for static kf timings
3531  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3533  }
3534  }
3535  break;
3536  case AVMEDIA_TYPE_SUBTITLE:
3537  enc_ctx->time_base = AV_TIME_BASE_Q;
3538  if (!enc_ctx->width) {
3541  }
3542  break;
3543  case AVMEDIA_TYPE_DATA:
3544  break;
3545  default:
3546  abort();
3547  break;
3548  }
3549 
3550  ost->mux_timebase = enc_ctx->time_base;
3551 
3552  return 0;
3553 }
3554 
3556  char *error, int error_len)
3557 {
3558  int ret = 0;
3559 
3560  if (ost->encoding_needed) {
3561  const AVCodec *codec = ost->enc;
3562  AVCodecContext *dec = NULL;
3563  InputStream *ist;
3564 
3566  if (ret < 0)
3567  return ret;
3568 
3569  if ((ist = get_input_stream(ost)))
3570  dec = ist->dec_ctx;
3571  if (dec && dec->subtitle_header) {
3572  /* ASS code assumes this buffer is null terminated so add extra byte. */
3573  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3574  if (!ost->enc_ctx->subtitle_header)
3575  return AVERROR(ENOMEM);
3576  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3577  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3578  }
3579  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3580  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3581 
3583  if (ret < 0) {
3584  snprintf(error, error_len, "Device setup failed for "
3585  "encoder on output stream #%d:%d : %s",
3587  return ret;
3588  }
3589 
3590  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3591  int input_props = 0, output_props = 0;
3592  AVCodecDescriptor const *input_descriptor =
3593  avcodec_descriptor_get(dec->codec_id);
3594  AVCodecDescriptor const *output_descriptor =
3596  if (input_descriptor)
3597  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3598  if (output_descriptor)
3599  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3600  if (input_props && output_props && input_props != output_props) {
3601  snprintf(error, error_len,
3602  "Subtitle encoding currently only possible from text to text "
3603  "or bitmap to bitmap");
3604  return AVERROR_INVALIDDATA;
3605  }
3606  }
3607 
3608  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3609  if (ret == AVERROR_EXPERIMENTAL)
3610  abort_codec_experimental(codec, 1);
3611  snprintf(error, error_len,
3612  "Error while opening encoder for output stream #%d:%d - "
3613  "maybe incorrect parameters such as bit_rate, rate, width or height",
3614  ost->file_index, ost->index);
3615  return ret;
3616  }
3617  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3618  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3620  ost->enc_ctx->frame_size);
3622  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3623  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3624  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3625  " It takes bits/s as argument, not kbits/s\n");
3626 
3628  if (ret < 0) {
3630  "Error initializing the output stream codec context.\n");
3631  exit_program(1);
3632  }
3633 
3634  if (ost->enc_ctx->nb_coded_side_data) {
3635  int i;
3636 
3637  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3638  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3639  uint8_t *dst_data;
3640 
3641  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3642  if (!dst_data)
3643  return AVERROR(ENOMEM);
3644  memcpy(dst_data, sd_src->data, sd_src->size);
3645  }
3646  }
3647 
3648  /*
3649  * Add global input side data. For now this is naive, and copies it
3650  * from the input stream's global side data. All side data should
3651  * really be funneled over AVFrame and libavfilter, then added back to
3652  * packet side data, and then potentially using the first packet for
3653  * global side data.
3654  */
3655  if (ist) {
3656  int i;
3657  for (i = 0; i < ist->st->nb_side_data; i++) {
3658  AVPacketSideData *sd = &ist->st->side_data[i];
3659  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3660  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3661  if (!dst)
3662  return AVERROR(ENOMEM);
3663  memcpy(dst, sd->data, sd->size);
3664  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3665  av_display_rotation_set((uint32_t *)dst, 0);
3666  }
3667  }
3668  }
3669 
3670  // copy timebase while removing common factors
3671  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3673 
3674  // copy estimated duration as a hint to the muxer
3675  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3676  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3677  } else if (ost->stream_copy) {
3679  if (ret < 0)
3680  return ret;
3681  }
3682 
3683  // parse user provided disposition, and update stream values
3684  if (ost->disposition) {
3685  static const AVOption opts[] = {
3686  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3687  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3688  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3689  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3690  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3691  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3692  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3693  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3694  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3695  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3696  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3697  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3698  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3699  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3700  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3701  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3702  { NULL },
3703  };
3704  static const AVClass class = {
3705  .class_name = "",
3706  .item_name = av_default_item_name,
3707  .option = opts,
3708  .version = LIBAVUTIL_VERSION_INT,
3709  };
3710  const AVClass *pclass = &class;
3711 
3712  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3713  if (ret < 0)
3714  return ret;
3715  }
3716 
3717  /* initialize bitstream filters for the output stream
3718  * needs to be done here, because the codec id for streamcopy is not
3719  * known until now */
3721  if (ret < 0)
3722  return ret;
3723 
3724  ost->initialized = 1;
3725 
3727  if (ret < 0)
3728  return ret;
3729 
3730  return ret;
3731 }
3732 
3733 static void report_new_stream(int input_index, AVPacket *pkt)
3734 {
3735  InputFile *file = input_files[input_index];
3736  AVStream *st = file->ctx->streams[pkt->stream_index];
3737 
3738  if (pkt->stream_index < file->nb_streams_warn)
3739  return;
3740  av_log(file->ctx, AV_LOG_WARNING,
3741  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3743  input_index, pkt->stream_index,
3744  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3745  file->nb_streams_warn = pkt->stream_index + 1;
3746 }
3747 
3748 static int transcode_init(void)
3749 {
3750  int ret = 0, i, j, k;
3751  AVFormatContext *oc;
3752  OutputStream *ost;
3753  InputStream *ist;
3754  char error[1024] = {0};
3755 
3756  for (i = 0; i < nb_filtergraphs; i++) {
3757  FilterGraph *fg = filtergraphs[i];
3758  for (j = 0; j < fg->nb_outputs; j++) {
3759  OutputFilter *ofilter = fg->outputs[j];
3760  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3761  continue;
3762  if (fg->nb_inputs != 1)
3763  continue;
3764  for (k = nb_input_streams-1; k >= 0 ; k--)
3765  if (fg->inputs[0]->ist == input_streams[k])
3766  break;
3767  ofilter->ost->source_index = k;
3768  }
3769  }
3770 
3771  /* init framerate emulation */
3772  for (i = 0; i < nb_input_files; i++) {
3774  if (ifile->readrate || ifile->rate_emu)
3775  for (j = 0; j < ifile->nb_streams; j++)
3776  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3777  }
3778 
3779  /* init input streams */
3780  for (i = 0; i < nb_input_streams; i++)
3781  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3782  for (i = 0; i < nb_output_streams; i++) {
3783  ost = output_streams[i];
3785  }
3786  goto dump_format;
3787  }
3788 
3789  /*
3790  * initialize stream copy and subtitle/data streams.
3791  * Encoded AVFrame based streams will get initialized as follows:
3792  * - when the first AVFrame is received in do_video_out
3793  * - just before the first AVFrame is received in either transcode_step
3794  * or reap_filters due to us requiring the filter chain buffer sink
3795  * to be configured with the correct audio frame size, which is only
3796  * known after the encoder is initialized.
3797  */
3798  for (i = 0; i < nb_output_streams; i++) {
3799  if (!output_streams[i]->stream_copy &&
3800  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3802  continue;
3803 
3805  if (ret < 0)
3806  goto dump_format;
3807  }
3808 
3809  /* discard unused programs */
3810  for (i = 0; i < nb_input_files; i++) {
3812  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3813  AVProgram *p = ifile->ctx->programs[j];
3814  int discard = AVDISCARD_ALL;
3815 
3816  for (k = 0; k < p->nb_stream_indexes; k++)
3817  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3818  discard = AVDISCARD_DEFAULT;
3819  break;
3820  }
3821  p->discard = discard;
3822  }
3823  }
3824 
3825  /* write headers for files with no streams */
3826  for (i = 0; i < nb_output_files; i++) {
3827  oc = output_files[i]->ctx;
3828  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3830  if (ret < 0)
3831  goto dump_format;
3832  }
3833  }
3834 
3835  dump_format:
3836  /* dump the stream mapping */
3837  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3838  for (i = 0; i < nb_input_streams; i++) {
3839  ist = input_streams[i];
3840 
3841  for (j = 0; j < ist->nb_filters; j++) {
3842  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3843  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3844  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3845  ist->filters[j]->name);
3846  if (nb_filtergraphs > 1)
3847  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3848  av_log(NULL, AV_LOG_INFO, "\n");
3849  }
3850  }
3851  }
3852 
3853  for (i = 0; i < nb_output_streams; i++) {
3854  ost = output_streams[i];
3855 
3856  if (ost->attachment_filename) {
3857  /* an attached file */
3858  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3860  continue;
3861  }
3862 
3864  /* output from a complex graph */
3865  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3866  if (nb_filtergraphs > 1)
3867  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3868 
3869  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3870  ost->index, ost->enc ? ost->enc->name : "?");
3871  continue;
3872  }
3873 
3874  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3877  ost->file_index,
3878  ost->index);
3880  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3882  ost->sync_ist->st->index);
3883  if (ost->stream_copy)
3884  av_log(NULL, AV_LOG_INFO, " (copy)");
3885  else {
3886  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3887  const AVCodec *out_codec = ost->enc;
3888  const char *decoder_name = "?";
3889  const char *in_codec_name = "?";
3890  const char *encoder_name = "?";
3891  const char *out_codec_name = "?";
3892  const AVCodecDescriptor *desc;
3893 
3894  if (in_codec) {
3895  decoder_name = in_codec->name;
3896  desc = avcodec_descriptor_get(in_codec->id);
3897  if (desc)
3898  in_codec_name = desc->name;
3899  if (!strcmp(decoder_name, in_codec_name))
3900  decoder_name = "native";
3901  }
3902 
3903  if (out_codec) {
3904  encoder_name = out_codec->name;
3905  desc = avcodec_descriptor_get(out_codec->id);
3906  if (desc)
3907  out_codec_name = desc->name;
3908  if (!strcmp(encoder_name, out_codec_name))
3909  encoder_name = "native";
3910  }
3911 
3912  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3913  in_codec_name, decoder_name,
3914  out_codec_name, encoder_name);
3915  }
3916  av_log(NULL, AV_LOG_INFO, "\n");
3917  }
3918 
3919  if (ret) {
3920  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3921  return ret;
3922  }
3923 
3925 
3926  return 0;
3927 }
3928 
3929 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3930 static int need_output(void)
3931 {
3932  int i;
3933 
3934  for (i = 0; i < nb_output_streams; i++) {
3938 
3939  if (ost->finished ||
3940  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3941  continue;
3942  if (ost->frame_number >= ost->max_frames) {
3943  int j;
3944  for (j = 0; j < of->ctx->nb_streams; j++)
3946  continue;
3947  }
3948 
3949  return 1;
3950  }
3951 
3952  return 0;
3953 }
3954 
3955 /**
3956  * Select the output stream to process.
3957  *
3958  * @return selected output stream, or NULL if none available
3959  */
3961 {
3962  int i;
3963  int64_t opts_min = INT64_MAX;
3964  OutputStream *ost_min = NULL;
3965 
3966  for (i = 0; i < nb_output_streams; i++) {
3968  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3970  AV_TIME_BASE_Q);
3973  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3975 
3976  if (!ost->initialized && !ost->inputs_done)
3977  return ost->unavailable ? NULL : ost;
3978 
3979  if (!ost->finished && opts < opts_min) {
3980  opts_min = opts;
3981  ost_min = ost->unavailable ? NULL : ost;
3982  }
3983  }
3984  return ost_min;
3985 }
3986 
3987 static void set_tty_echo(int on)
3988 {
3989 #if HAVE_TERMIOS_H
3990  struct termios tty;
3991  if (tcgetattr(0, &tty) == 0) {
3992  if (on) tty.c_lflag |= ECHO;
3993  else tty.c_lflag &= ~ECHO;
3994  tcsetattr(0, TCSANOW, &tty);
3995  }
3996 #endif
3997 }
3998 
3999 static int check_keyboard_interaction(int64_t cur_time)
4000 {
4001  int i, ret, key;
4002  static int64_t last_time;
4003  if (received_nb_signals)
4004  return AVERROR_EXIT;
4005  /* read_key() returns 0 on EOF */
4006  if(cur_time - last_time >= 100000 && !run_as_daemon){
4007  key = read_key();
4008  last_time = cur_time;
4009  }else
4010  key = -1;
4011  if (key == 'q')
4012  return AVERROR_EXIT;
4013  if (key == '+') av_log_set_level(av_log_get_level()+10);
4014  if (key == '-') av_log_set_level(av_log_get_level()-10);
4015  if (key == 's') qp_hist ^= 1;
4016  if (key == 'h'){
4017  if (do_hex_dump){
4018  do_hex_dump = do_pkt_dump = 0;
4019  } else if(do_pkt_dump){
4020  do_hex_dump = 1;
4021  } else
4022  do_pkt_dump = 1;
4024  }
4025  if (key == 'c' || key == 'C'){
4026  char buf[4096], target[64], command[256], arg[256] = {0};
4027  double time;
4028  int k, n = 0;
4029  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4030  i = 0;
4031  set_tty_echo(1);
4032  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4033  if (k > 0)
4034  buf[i++] = k;
4035  buf[i] = 0;
4036  set_tty_echo(0);
4037  fprintf(stderr, "\n");
4038  if (k > 0 &&
4039  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4040  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4041  target, time, command, arg);
4042  for (i = 0; i < nb_filtergraphs; i++) {
4043  FilterGraph *fg = filtergraphs[i];
4044  if (fg->graph) {
4045  if (time < 0) {
4046  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4047  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4048  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4049  } else if (key == 'c') {
4050  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4052  } else {
4053  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4054  if (ret < 0)
4055  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4056  }
4057  }
4058  }
4059  } else {
4061  "Parse error, at least 3 arguments were expected, "
4062  "only %d given in string '%s'\n", n, buf);
4063  }
4064  }
4065  if (key == 'd' || key == 'D'){
4066  int debug=0;
4067  if(key == 'D') {
4068  debug = input_streams[0]->dec_ctx->debug << 1;
4069  if(!debug) debug = 1;
4070  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4071  debug += debug;
4072  }else{
4073  char buf[32];
4074  int k = 0;
<