FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int current_time;
134 
136 
141 
146 
149 
150 #if HAVE_TERMIOS_H
151 
152 /* init terminal so that we can grab keys */
153 static struct termios oldtty;
154 static int restore_tty;
155 #endif
156 
157 #if HAVE_PTHREADS
158 static void free_input_threads(void);
159 #endif
160 
161 /* sub2video hack:
162  Convert subtitles to video with alpha to insert them in filter graphs.
163  This is a temporary solution until libavfilter gets real subtitles support.
164  */
165 
167 {
168  int ret;
169  AVFrame *frame = ist->sub2video.frame;
170 
171  av_frame_unref(frame);
172  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
173  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
175  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
176  return ret;
177  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
178  return 0;
179 }
180 
181 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
182  AVSubtitleRect *r)
183 {
184  uint32_t *pal, *dst2;
185  uint8_t *src, *src2;
186  int x, y;
187 
188  if (r->type != SUBTITLE_BITMAP) {
189  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
190  return;
191  }
192  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
193  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
194  r->x, r->y, r->w, r->h, w, h
195  );
196  return;
197  }
198 
199  dst += r->y * dst_linesize + r->x * 4;
200  src = r->data[0];
201  pal = (uint32_t *)r->data[1];
202  for (y = 0; y < r->h; y++) {
203  dst2 = (uint32_t *)dst;
204  src2 = src;
205  for (x = 0; x < r->w; x++)
206  *(dst2++) = pal[*(src2++)];
207  dst += dst_linesize;
208  src += r->linesize[0];
209  }
210 }
211 
212 static void sub2video_push_ref(InputStream *ist, int64_t pts)
213 {
214  AVFrame *frame = ist->sub2video.frame;
215  int i;
216 
217  av_assert1(frame->data[0]);
218  ist->sub2video.last_pts = frame->pts = pts;
219  for (i = 0; i < ist->nb_filters; i++)
223 }
224 
225 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
226 {
227  AVFrame *frame = ist->sub2video.frame;
228  int8_t *dst;
229  int dst_linesize;
230  int num_rects, i;
231  int64_t pts, end_pts;
232 
233  if (!frame)
234  return;
235  if (sub) {
236  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  num_rects = sub->num_rects;
241  } else {
242  pts = ist->sub2video.end_pts;
243  end_pts = INT64_MAX;
244  num_rects = 0;
245  }
246  if (sub2video_get_blank_frame(ist) < 0) {
248  "Impossible to get a blank canvas.\n");
249  return;
250  }
251  dst = frame->data [0];
252  dst_linesize = frame->linesize[0];
253  for (i = 0; i < num_rects; i++)
254  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
255  sub2video_push_ref(ist, pts);
256  ist->sub2video.end_pts = end_pts;
257 }
258 
259 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
260 {
261  InputFile *infile = input_files[ist->file_index];
262  int i, j, nb_reqs;
263  int64_t pts2;
264 
265  /* When a frame is read from a file, examine all sub2video streams in
266  the same file and send the sub2video frame again. Otherwise, decoded
267  video frames could be accumulating in the filter graph while a filter
268  (possibly overlay) is desperately waiting for a subtitle frame. */
269  for (i = 0; i < infile->nb_streams; i++) {
270  InputStream *ist2 = input_streams[infile->ist_index + i];
271  if (!ist2->sub2video.frame)
272  continue;
273  /* subtitles seem to be usually muxed ahead of other streams;
274  if not, subtracting a larger time here is necessary */
275  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
276  /* do not send the heartbeat frame if the subtitle is already ahead */
277  if (pts2 <= ist2->sub2video.last_pts)
278  continue;
279  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
280  sub2video_update(ist2, NULL);
281  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
282  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
283  if (nb_reqs)
284  sub2video_push_ref(ist2, pts2);
285  }
286 }
287 
288 static void sub2video_flush(InputStream *ist)
289 {
290  int i;
291 
292  if (ist->sub2video.end_pts < INT64_MAX)
293  sub2video_update(ist, NULL);
294  for (i = 0; i < ist->nb_filters; i++)
296 }
297 
298 /* end of sub2video hack */
299 
300 static void term_exit_sigsafe(void)
301 {
302 #if HAVE_TERMIOS_H
303  if(restore_tty)
304  tcsetattr (0, TCSANOW, &oldtty);
305 #endif
306 }
307 
308 void term_exit(void)
309 {
310  av_log(NULL, AV_LOG_QUIET, "%s", "");
312 }
313 
314 static volatile int received_sigterm = 0;
315 static volatile int received_nb_signals = 0;
316 static volatile int transcode_init_done = 0;
317 static volatile int ffmpeg_exited = 0;
318 static int main_return_code = 0;
319 
320 static void
322 {
323  received_sigterm = sig;
326  if(received_nb_signals > 3) {
327  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
328  strlen("Received > 3 system signals, hard exiting\n"));
329 
330  exit(123);
331  }
332 }
333 
334 #if HAVE_SETCONSOLECTRLHANDLER
335 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
336 {
337  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
338 
339  switch (fdwCtrlType)
340  {
341  case CTRL_C_EVENT:
342  case CTRL_BREAK_EVENT:
343  sigterm_handler(SIGINT);
344  return TRUE;
345 
346  case CTRL_CLOSE_EVENT:
347  case CTRL_LOGOFF_EVENT:
348  case CTRL_SHUTDOWN_EVENT:
349  sigterm_handler(SIGTERM);
350  /* Basically, with these 3 events, when we return from this method the
351  process is hard terminated, so stall as long as we need to
352  to try and let the main thread(s) clean up and gracefully terminate
353  (we have at most 5 seconds, but should be done far before that). */
354  while (!ffmpeg_exited) {
355  Sleep(0);
356  }
357  return TRUE;
358 
359  default:
360  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
361  return FALSE;
362  }
363 }
364 #endif
365 
366 void term_init(void)
367 {
368 #if HAVE_TERMIOS_H
369  if(!run_as_daemon){
370  struct termios tty;
371  if (tcgetattr (0, &tty) == 0) {
372  oldtty = tty;
373  restore_tty = 1;
374 
375  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
376  |INLCR|IGNCR|ICRNL|IXON);
377  tty.c_oflag |= OPOST;
378  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
379  tty.c_cflag &= ~(CSIZE|PARENB);
380  tty.c_cflag |= CS8;
381  tty.c_cc[VMIN] = 1;
382  tty.c_cc[VTIME] = 0;
383 
384  tcsetattr (0, TCSANOW, &tty);
385  }
386  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
387  }
388 #endif
389 
390  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
391  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
392 #ifdef SIGXCPU
393  signal(SIGXCPU, sigterm_handler);
394 #endif
395 #if HAVE_SETCONSOLECTRLHANDLER
396  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
397 #endif
398 }
399 
400 /* read a key without blocking */
401 static int read_key(void)
402 {
403  unsigned char ch;
404 #if HAVE_TERMIOS_H
405  int n = 1;
406  struct timeval tv;
407  fd_set rfds;
408 
409  FD_ZERO(&rfds);
410  FD_SET(0, &rfds);
411  tv.tv_sec = 0;
412  tv.tv_usec = 0;
413  n = select(1, &rfds, NULL, NULL, &tv);
414  if (n > 0) {
415  n = read(0, &ch, 1);
416  if (n == 1)
417  return ch;
418 
419  return n;
420  }
421 #elif HAVE_KBHIT
422 # if HAVE_PEEKNAMEDPIPE
423  static int is_pipe;
424  static HANDLE input_handle;
425  DWORD dw, nchars;
426  if(!input_handle){
427  input_handle = GetStdHandle(STD_INPUT_HANDLE);
428  is_pipe = !GetConsoleMode(input_handle, &dw);
429  }
430 
431  if (is_pipe) {
432  /* When running under a GUI, you will end here. */
433  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
434  // input pipe may have been closed by the program that ran ffmpeg
435  return -1;
436  }
437  //Read it
438  if(nchars != 0) {
439  read(0, &ch, 1);
440  return ch;
441  }else{
442  return -1;
443  }
444  }
445 # endif
446  if(kbhit())
447  return(getch());
448 #endif
449  return -1;
450 }
451 
452 static int decode_interrupt_cb(void *ctx)
453 {
455 }
456 
458 
459 static void ffmpeg_cleanup(int ret)
460 {
461  int i, j;
462 
463  if (do_benchmark) {
464  int maxrss = getmaxrss() / 1024;
465  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
466  }
467 
468  for (i = 0; i < nb_filtergraphs; i++) {
469  FilterGraph *fg = filtergraphs[i];
471  for (j = 0; j < fg->nb_inputs; j++) {
472  av_freep(&fg->inputs[j]->name);
473  av_freep(&fg->inputs[j]);
474  }
475  av_freep(&fg->inputs);
476  for (j = 0; j < fg->nb_outputs; j++) {
477  av_freep(&fg->outputs[j]->name);
478  av_freep(&fg->outputs[j]);
479  }
480  av_freep(&fg->outputs);
481  av_freep(&fg->graph_desc);
482 
483  av_freep(&filtergraphs[i]);
484  }
485  av_freep(&filtergraphs);
486 
488 
489  /* close files */
490  for (i = 0; i < nb_output_files; i++) {
491  OutputFile *of = output_files[i];
493  if (!of)
494  continue;
495  s = of->ctx;
496  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
497  avio_closep(&s->pb);
499  av_dict_free(&of->opts);
500 
501  av_freep(&output_files[i]);
502  }
503  for (i = 0; i < nb_output_streams; i++) {
504  OutputStream *ost = output_streams[i];
506 
507  if (!ost)
508  continue;
509 
510  bsfc = ost->bitstream_filters;
511  while (bsfc) {
512  AVBitStreamFilterContext *next = bsfc->next;
514  bsfc = next;
515  }
516  ost->bitstream_filters = NULL;
518  av_frame_free(&ost->last_frame);
519 
520  av_parser_close(ost->parser);
521 
522  av_freep(&ost->forced_keyframes);
524  av_freep(&ost->avfilter);
525  av_freep(&ost->logfile_prefix);
526 
528  ost->audio_channels_mapped = 0;
529 
530  av_dict_free(&ost->sws_dict);
531 
533 
534  av_freep(&output_streams[i]);
535  }
536 #if HAVE_PTHREADS
537  free_input_threads();
538 #endif
539  for (i = 0; i < nb_input_files; i++) {
540  avformat_close_input(&input_files[i]->ctx);
541  av_freep(&input_files[i]);
542  }
543  for (i = 0; i < nb_input_streams; i++) {
544  InputStream *ist = input_streams[i];
545 
548  av_dict_free(&ist->decoder_opts);
551  av_freep(&ist->filters);
552  av_freep(&ist->hwaccel_device);
553 
555 
556  av_freep(&input_streams[i]);
557  }
558 
559  if (vstats_file) {
560  if (fclose(vstats_file))
562  "Error closing vstats file, loss of information possible: %s\n",
563  av_err2str(AVERROR(errno)));
564  }
566 
567  av_freep(&input_streams);
568  av_freep(&input_files);
569  av_freep(&output_streams);
570  av_freep(&output_files);
571 
572  uninit_opts();
573 
575 
576  if (received_sigterm) {
577  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
578  (int) received_sigterm);
579  } else if (ret && transcode_init_done) {
580  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
581  }
582  term_exit();
583  ffmpeg_exited = 1;
584 }
585 
587 {
588  AVDictionaryEntry *t = NULL;
589 
590  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
592  }
593 }
594 
596 {
598  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
599  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
600  exit_program(1);
601  }
602 }
603 
604 static void abort_codec_experimental(AVCodec *c, int encoder)
605 {
606  exit_program(1);
607 }
608 
609 static void update_benchmark(const char *fmt, ...)
610 {
611  if (do_benchmark_all) {
612  int64_t t = getutime();
613  va_list va;
614  char buf[1024];
615 
616  if (fmt) {
617  va_start(va, fmt);
618  vsnprintf(buf, sizeof(buf), fmt, va);
619  va_end(va);
620  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
621  }
622  current_time = t;
623  }
624 }
625 
626 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
627 {
628  int i;
629  for (i = 0; i < nb_output_streams; i++) {
630  OutputStream *ost2 = output_streams[i];
631  ost2->finished |= ost == ost2 ? this_stream : others;
632  }
633 }
634 
636 {
638  AVCodecContext *avctx = ost->encoding_needed ? ost->enc_ctx : ost->st->codec;
639  int ret;
640 
641  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
642  ost->st->codec->extradata = av_mallocz(ost->enc_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
643  if (ost->st->codec->extradata) {
644  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
645  ost->st->codec->extradata_size = ost->enc_ctx->extradata_size;
646  }
647  }
648 
651  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
652 
653  /*
654  * Audio encoders may split the packets -- #frames in != #packets out.
655  * But there is no reordering, so we can limit the number of output packets
656  * by simply dropping them here.
657  * Counting encoded video frames needs to be done separately because of
658  * reordering, see do_video_out()
659  */
660  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
661  if (ost->frame_number >= ost->max_frames) {
662  av_packet_unref(pkt);
663  return;
664  }
665  ost->frame_number++;
666  }
667  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668  int i;
670  NULL);
671  ost->quality = sd ? AV_RL32(sd) : -1;
672  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
673 
674  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
675  if (sd && i < sd[5])
676  ost->error[i] = AV_RL64(sd + 8 + 8*i);
677  else
678  ost->error[i] = -1;
679  }
680 
681  if (ost->frame_rate.num && ost->is_cfr) {
682  if (pkt->duration > 0)
683  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
684  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
685  ost->st->time_base);
686  }
687  }
688 
689  if (bsfc)
691 
692  if ((ret = av_apply_bitstream_filters(avctx, pkt, bsfc)) < 0) {
693  print_error("", ret);
694  if (exit_on_error)
695  exit_program(1);
696  }
697  if (pkt->size == 0 && pkt->side_data_elems == 0)
698  return;
699  if (!ost->st->codecpar->extradata && avctx->extradata) {
701  if (!ost->st->codecpar->extradata) {
702  av_log(NULL, AV_LOG_ERROR, "Could not allocate extradata buffer to copy parser data.\n");
703  exit_program(1);
704  }
705  ost->st->codecpar->extradata_size = avctx->extradata_size;
706  memcpy(ost->st->codecpar->extradata, avctx->extradata, avctx->extradata_size);
707  }
708 
709  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
710  if (pkt->dts != AV_NOPTS_VALUE &&
711  pkt->pts != AV_NOPTS_VALUE &&
712  pkt->dts > pkt->pts) {
713  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
714  pkt->dts, pkt->pts,
715  ost->file_index, ost->st->index);
716  pkt->pts =
717  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
718  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
719  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
720  }
721  if(
722  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
723  pkt->dts != AV_NOPTS_VALUE &&
724  !(avctx->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
725  ost->last_mux_dts != AV_NOPTS_VALUE) {
726  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
727  if (pkt->dts < max) {
728  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
729  av_log(s, loglevel, "Non-monotonous DTS in output stream "
730  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
731  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
732  if (exit_on_error) {
733  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
734  exit_program(1);
735  }
736  av_log(s, loglevel, "changing to %"PRId64". This may result "
737  "in incorrect timestamps in the output file.\n",
738  max);
739  if(pkt->pts >= pkt->dts)
740  pkt->pts = FFMAX(pkt->pts, max);
741  pkt->dts = max;
742  }
743  }
744  }
745  ost->last_mux_dts = pkt->dts;
746 
747  ost->data_size += pkt->size;
748  ost->packets_written++;
749 
750  pkt->stream_index = ost->index;
751 
752  if (debug_ts) {
753  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
754  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
756  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
757  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
758  pkt->size
759  );
760  }
761 
762  ret = av_interleaved_write_frame(s, pkt);
763  if (ret < 0) {
764  print_error("av_interleaved_write_frame()", ret);
765  main_return_code = 1;
767  }
768  av_packet_unref(pkt);
769 }
770 
772 {
773  OutputFile *of = output_files[ost->file_index];
774 
775  ost->finished |= ENCODER_FINISHED;
776  if (of->shortest) {
777  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
778  of->recording_time = FFMIN(of->recording_time, end);
779  }
780 }
781 
783 {
784  OutputFile *of = output_files[ost->file_index];
785 
786  if (of->recording_time != INT64_MAX &&
788  AV_TIME_BASE_Q) >= 0) {
789  close_output_stream(ost);
790  return 0;
791  }
792  return 1;
793 }
794 
796  AVFrame *frame)
797 {
798  AVCodecContext *enc = ost->enc_ctx;
799  AVPacket pkt;
800  int got_packet = 0;
801 
802  av_init_packet(&pkt);
803  pkt.data = NULL;
804  pkt.size = 0;
805 
806  if (!check_recording_time(ost))
807  return;
808 
809  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
810  frame->pts = ost->sync_opts;
811  ost->sync_opts = frame->pts + frame->nb_samples;
812  ost->samples_encoded += frame->nb_samples;
813  ost->frames_encoded++;
814 
815  av_assert0(pkt.size || !pkt.data);
817  if (debug_ts) {
818  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
819  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
820  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
821  enc->time_base.num, enc->time_base.den);
822  }
823 
824  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
825  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
826  exit_program(1);
827  }
828  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
829 
830  if (got_packet) {
831  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
832 
833  if (debug_ts) {
834  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
835  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
836  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
837  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
838  }
839 
840  write_frame(s, &pkt, ost);
841  }
842 }
843 
845  OutputStream *ost,
846  InputStream *ist,
847  AVSubtitle *sub)
848 {
849  int subtitle_out_max_size = 1024 * 1024;
850  int subtitle_out_size, nb, i;
851  AVCodecContext *enc;
852  AVPacket pkt;
853  int64_t pts;
854 
855  if (sub->pts == AV_NOPTS_VALUE) {
856  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
857  if (exit_on_error)
858  exit_program(1);
859  return;
860  }
861 
862  enc = ost->enc_ctx;
863 
864  if (!subtitle_out) {
865  subtitle_out = av_malloc(subtitle_out_max_size);
866  if (!subtitle_out) {
867  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
868  exit_program(1);
869  }
870  }
871 
872  /* Note: DVB subtitle need one packet to draw them and one other
873  packet to clear them */
874  /* XXX: signal it in the codec context ? */
876  nb = 2;
877  else
878  nb = 1;
879 
880  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
881  pts = sub->pts;
882  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
883  pts -= output_files[ost->file_index]->start_time;
884  for (i = 0; i < nb; i++) {
885  unsigned save_num_rects = sub->num_rects;
886 
887  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
888  if (!check_recording_time(ost))
889  return;
890 
891  sub->pts = pts;
892  // start_display_time is required to be 0
893  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
895  sub->start_display_time = 0;
896  if (i == 1)
897  sub->num_rects = 0;
898 
899  ost->frames_encoded++;
900 
901  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
902  subtitle_out_max_size, sub);
903  if (i == 1)
904  sub->num_rects = save_num_rects;
905  if (subtitle_out_size < 0) {
906  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
907  exit_program(1);
908  }
909 
910  av_init_packet(&pkt);
911  pkt.data = subtitle_out;
912  pkt.size = subtitle_out_size;
913  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
914  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
915  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
916  /* XXX: the pts correction is handled here. Maybe handling
917  it in the codec would be better */
918  if (i == 0)
919  pkt.pts += 90 * sub->start_display_time;
920  else
921  pkt.pts += 90 * sub->end_display_time;
922  }
923  pkt.dts = pkt.pts;
924  write_frame(s, &pkt, ost);
925  }
926 }
927 
929  OutputStream *ost,
930  AVFrame *next_picture,
931  double sync_ipts)
932 {
933  int ret, format_video_sync;
934  AVPacket pkt;
935  AVCodecContext *enc = ost->enc_ctx;
936  AVCodecContext *mux_enc = ost->st->codec;
937  int nb_frames, nb0_frames, i;
938  double delta, delta0;
939  double duration = 0;
940  int frame_size = 0;
941  InputStream *ist = NULL;
943 
944  if (ost->source_index >= 0)
945  ist = input_streams[ost->source_index];
946 
947  if (filter->inputs[0]->frame_rate.num > 0 &&
948  filter->inputs[0]->frame_rate.den > 0)
949  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
950 
951  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
952  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
953 
954  if (!ost->filters_script &&
955  !ost->filters &&
956  next_picture &&
957  ist &&
958  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
959  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
960  }
961 
962  if (!next_picture) {
963  //end, flushing
964  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
965  ost->last_nb0_frames[1],
966  ost->last_nb0_frames[2]);
967  } else {
968  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
969  delta = delta0 + duration;
970 
971  /* by default, we output a single frame */
972  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
973  nb_frames = 1;
974 
975  format_video_sync = video_sync_method;
976  if (format_video_sync == VSYNC_AUTO) {
977  if(!strcmp(s->oformat->name, "avi")) {
978  format_video_sync = VSYNC_VFR;
979  } else
981  if ( ist
982  && format_video_sync == VSYNC_CFR
983  && input_files[ist->file_index]->ctx->nb_streams == 1
984  && input_files[ist->file_index]->input_ts_offset == 0) {
985  format_video_sync = VSYNC_VSCFR;
986  }
987  if (format_video_sync == VSYNC_CFR && copy_ts) {
988  format_video_sync = VSYNC_VSCFR;
989  }
990  }
991  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
992 
993  if (delta0 < 0 &&
994  delta > 0 &&
995  format_video_sync != VSYNC_PASSTHROUGH &&
996  format_video_sync != VSYNC_DROP) {
997  if (delta0 < -0.6) {
998  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
999  } else
1000  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1001  sync_ipts = ost->sync_opts;
1002  duration += delta0;
1003  delta0 = 0;
1004  }
1005 
1006  switch (format_video_sync) {
1007  case VSYNC_VSCFR:
1008  if (ost->frame_number == 0 && delta0 >= 0.5) {
1009  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1010  delta = duration;
1011  delta0 = 0;
1012  ost->sync_opts = lrint(sync_ipts);
1013  }
1014  case VSYNC_CFR:
1015  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1016  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1017  nb_frames = 0;
1018  } else if (delta < -1.1)
1019  nb_frames = 0;
1020  else if (delta > 1.1) {
1021  nb_frames = lrintf(delta);
1022  if (delta0 > 1.1)
1023  nb0_frames = lrintf(delta0 - 0.6);
1024  }
1025  break;
1026  case VSYNC_VFR:
1027  if (delta <= -0.6)
1028  nb_frames = 0;
1029  else if (delta > 0.6)
1030  ost->sync_opts = lrint(sync_ipts);
1031  break;
1032  case VSYNC_DROP:
1033  case VSYNC_PASSTHROUGH:
1034  ost->sync_opts = lrint(sync_ipts);
1035  break;
1036  default:
1037  av_assert0(0);
1038  }
1039  }
1040 
1041  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1042  nb0_frames = FFMIN(nb0_frames, nb_frames);
1043 
1044  memmove(ost->last_nb0_frames + 1,
1045  ost->last_nb0_frames,
1046  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1047  ost->last_nb0_frames[0] = nb0_frames;
1048 
1049  if (nb0_frames == 0 && ost->last_dropped) {
1050  nb_frames_drop++;
1052  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1053  ost->frame_number, ost->st->index, ost->last_frame->pts);
1054  }
1055  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1056  if (nb_frames > dts_error_threshold * 30) {
1057  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1058  nb_frames_drop++;
1059  return;
1060  }
1061  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1062  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1063  }
1064  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1065 
1066  /* duplicates frame if needed */
1067  for (i = 0; i < nb_frames; i++) {
1068  AVFrame *in_picture;
1069  av_init_packet(&pkt);
1070  pkt.data = NULL;
1071  pkt.size = 0;
1072 
1073  if (i < nb0_frames && ost->last_frame) {
1074  in_picture = ost->last_frame;
1075  } else
1076  in_picture = next_picture;
1077 
1078  if (!in_picture)
1079  return;
1080 
1081  in_picture->pts = ost->sync_opts;
1082 
1083 #if 1
1084  if (!check_recording_time(ost))
1085 #else
1086  if (ost->frame_number >= ost->max_frames)
1087 #endif
1088  return;
1089 
1090 #if FF_API_LAVF_FMT_RAWPICTURE
1091  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1092  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1093  /* raw pictures are written as AVPicture structure to
1094  avoid any copies. We support temporarily the older
1095  method. */
1096  if (in_picture->interlaced_frame)
1097  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1098  else
1099  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1100  pkt.data = (uint8_t *)in_picture;
1101  pkt.size = sizeof(AVPicture);
1102  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1103  pkt.flags |= AV_PKT_FLAG_KEY;
1104 
1105  write_frame(s, &pkt, ost);
1106  } else
1107 #endif
1108  {
1109  int got_packet, forced_keyframe = 0;
1110  double pts_time;
1111 
1113  ost->top_field_first >= 0)
1114  in_picture->top_field_first = !!ost->top_field_first;
1115 
1116  if (in_picture->interlaced_frame) {
1117  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1118  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1119  else
1120  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1121  } else
1122  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1123 
1124  in_picture->quality = enc->global_quality;
1125  in_picture->pict_type = 0;
1126 
1127  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1128  in_picture->pts * av_q2d(enc->time_base) : NAN;
1129  if (ost->forced_kf_index < ost->forced_kf_count &&
1130  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1131  ost->forced_kf_index++;
1132  forced_keyframe = 1;
1133  } else if (ost->forced_keyframes_pexpr) {
1134  double res;
1135  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1138  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1144  res);
1145  if (res) {
1146  forced_keyframe = 1;
1152  }
1153 
1155  } else if ( ost->forced_keyframes
1156  && !strncmp(ost->forced_keyframes, "source", 6)
1157  && in_picture->key_frame==1) {
1158  forced_keyframe = 1;
1159  }
1160 
1161  if (forced_keyframe) {
1162  in_picture->pict_type = AV_PICTURE_TYPE_I;
1163  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1164  }
1165 
1167  if (debug_ts) {
1168  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1169  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1170  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1171  enc->time_base.num, enc->time_base.den);
1172  }
1173 
1174  ost->frames_encoded++;
1175 
1176  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1177  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1178  if (ret < 0) {
1179  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1180  exit_program(1);
1181  }
1182 
1183  if (got_packet) {
1184  if (debug_ts) {
1185  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1186  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1187  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1188  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1189  }
1190 
1191  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1192  pkt.pts = ost->sync_opts;
1193 
1194  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1195 
1196  if (debug_ts) {
1197  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1198  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1199  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1200  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1201  }
1202 
1203  frame_size = pkt.size;
1204  write_frame(s, &pkt, ost);
1205 
1206  /* if two pass, output log */
1207  if (ost->logfile && enc->stats_out) {
1208  fprintf(ost->logfile, "%s", enc->stats_out);
1209  }
1210  }
1211  }
1212  ost->sync_opts++;
1213  /*
1214  * For video, number of frames in == number of packets out.
1215  * But there may be reordering, so we can't throw away frames on encoder
1216  * flush, we need to limit them here, before they go into encoder.
1217  */
1218  ost->frame_number++;
1219 
1220  if (vstats_filename && frame_size)
1221  do_video_stats(ost, frame_size);
1222  }
1223 
1224  if (!ost->last_frame)
1225  ost->last_frame = av_frame_alloc();
1226  av_frame_unref(ost->last_frame);
1227  if (next_picture && ost->last_frame)
1228  av_frame_ref(ost->last_frame, next_picture);
1229  else
1230  av_frame_free(&ost->last_frame);
1231 }
1232 
1233 static double psnr(double d)
1234 {
1235  return -10.0 * log10(d);
1236 }
1237 
1239 {
1240  AVCodecContext *enc;
1241  int frame_number;
1242  double ti1, bitrate, avg_bitrate;
1243 
1244  /* this is executed just the first time do_video_stats is called */
1245  if (!vstats_file) {
1246  vstats_file = fopen(vstats_filename, "w");
1247  if (!vstats_file) {
1248  perror("fopen");
1249  exit_program(1);
1250  }
1251  }
1252 
1253  enc = ost->enc_ctx;
1254  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1255  frame_number = ost->st->nb_frames;
1256  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1257  ost->quality / (float)FF_QP2LAMBDA);
1258 
1259  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1260  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1261 
1262  fprintf(vstats_file,"f_size= %6d ", frame_size);
1263  /* compute pts value */
1264  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1265  if (ti1 < 0.01)
1266  ti1 = 0.01;
1267 
1268  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1269  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1270  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1271  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1272  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1273  }
1274 }
1275 
1277 {
1278  OutputFile *of = output_files[ost->file_index];
1279  int i;
1280 
1282 
1283  if (of->shortest) {
1284  for (i = 0; i < of->ctx->nb_streams; i++)
1285  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1286  }
1287 }
1288 
1289 /**
1290  * Get and encode new output from any of the filtergraphs, without causing
1291  * activity.
1292  *
1293  * @return 0 for success, <0 for severe errors
1294  */
1295 static int reap_filters(int flush)
1296 {
1297  AVFrame *filtered_frame = NULL;
1298  int i;
1299 
1300  /* Reap all buffers present in the buffer sinks */
1301  for (i = 0; i < nb_output_streams; i++) {
1302  OutputStream *ost = output_streams[i];
1303  OutputFile *of = output_files[ost->file_index];
1305  AVCodecContext *enc = ost->enc_ctx;
1306  int ret = 0;
1307 
1308  if (!ost->filter)
1309  continue;
1310  filter = ost->filter->filter;
1311 
1312  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1313  return AVERROR(ENOMEM);
1314  }
1315  filtered_frame = ost->filtered_frame;
1316 
1317  while (1) {
1318  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1319  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1321  if (ret < 0) {
1322  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1324  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1325  } else if (flush && ret == AVERROR_EOF) {
1326  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1327  do_video_out(of->ctx, ost, NULL, AV_NOPTS_VALUE);
1328  }
1329  break;
1330  }
1331  if (ost->finished) {
1332  av_frame_unref(filtered_frame);
1333  continue;
1334  }
1335  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1336  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1337  AVRational tb = enc->time_base;
1338  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1339 
1340  tb.den <<= extra_bits;
1341  float_pts =
1342  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1343  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1344  float_pts /= 1 << extra_bits;
1345  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1346  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1347 
1348  filtered_frame->pts =
1349  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1350  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1351  }
1352  //if (ost->source_index >= 0)
1353  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1354 
1355  switch (filter->inputs[0]->type) {
1356  case AVMEDIA_TYPE_VIDEO:
1357  if (!ost->frame_aspect_ratio.num)
1358  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1359 
1360  if (debug_ts) {
1361  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1362  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1363  float_pts,
1364  enc->time_base.num, enc->time_base.den);
1365  }
1366 
1367  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1368  break;
1369  case AVMEDIA_TYPE_AUDIO:
1370  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371  enc->channels != av_frame_get_channels(filtered_frame)) {
1373  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1374  break;
1375  }
1376  do_audio_out(of->ctx, ost, filtered_frame);
1377  break;
1378  default:
1379  // TODO support subtitle filters
1380  av_assert0(0);
1381  }
1382 
1383  av_frame_unref(filtered_frame);
1384  }
1385  }
1386 
1387  return 0;
1388 }
1389 
1390 static void print_final_stats(int64_t total_size)
1391 {
1392  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393  uint64_t subtitle_size = 0;
1394  uint64_t data_size = 0;
1395  float percent = -1.0;
1396  int i, j;
1397  int pass1_used = 1;
1398 
1399  for (i = 0; i < nb_output_streams; i++) {
1400  OutputStream *ost = output_streams[i];
1401  switch (ost->enc_ctx->codec_type) {
1402  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405  default: other_size += ost->data_size; break;
1406  }
1407  extra_size += ost->enc_ctx->extradata_size;
1408  data_size += ost->data_size;
1411  pass1_used = 0;
1412  }
1413 
1414  if (data_size && total_size>0 && total_size >= data_size)
1415  percent = 100.0 * (total_size - data_size) / data_size;
1416 
1417  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418  video_size / 1024.0,
1419  audio_size / 1024.0,
1420  subtitle_size / 1024.0,
1421  other_size / 1024.0,
1422  extra_size / 1024.0);
1423  if (percent >= 0.0)
1424  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425  else
1426  av_log(NULL, AV_LOG_INFO, "unknown");
1427  av_log(NULL, AV_LOG_INFO, "\n");
1428 
1429  /* print verbose per-stream stats */
1430  for (i = 0; i < nb_input_files; i++) {
1431  InputFile *f = input_files[i];
1432  uint64_t total_packets = 0, total_size = 0;
1433 
1434  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435  i, f->ctx->filename);
1436 
1437  for (j = 0; j < f->nb_streams; j++) {
1438  InputStream *ist = input_streams[f->ist_index + j];
1439  enum AVMediaType type = ist->dec_ctx->codec_type;
1440 
1441  total_size += ist->data_size;
1442  total_packets += ist->nb_packets;
1443 
1444  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1445  i, j, media_type_string(type));
1446  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447  ist->nb_packets, ist->data_size);
1448 
1449  if (ist->decoding_needed) {
1450  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451  ist->frames_decoded);
1452  if (type == AVMEDIA_TYPE_AUDIO)
1453  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454  av_log(NULL, AV_LOG_VERBOSE, "; ");
1455  }
1456 
1457  av_log(NULL, AV_LOG_VERBOSE, "\n");
1458  }
1459 
1460  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461  total_packets, total_size);
1462  }
1463 
1464  for (i = 0; i < nb_output_files; i++) {
1465  OutputFile *of = output_files[i];
1466  uint64_t total_packets = 0, total_size = 0;
1467 
1468  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469  i, of->ctx->filename);
1470 
1471  for (j = 0; j < of->ctx->nb_streams; j++) {
1472  OutputStream *ost = output_streams[of->ost_index + j];
1473  enum AVMediaType type = ost->enc_ctx->codec_type;
1474 
1475  total_size += ost->data_size;
1476  total_packets += ost->packets_written;
1477 
1478  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1479  i, j, media_type_string(type));
1480  if (ost->encoding_needed) {
1481  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482  ost->frames_encoded);
1483  if (type == AVMEDIA_TYPE_AUDIO)
1484  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485  av_log(NULL, AV_LOG_VERBOSE, "; ");
1486  }
1487 
1488  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489  ost->packets_written, ost->data_size);
1490 
1491  av_log(NULL, AV_LOG_VERBOSE, "\n");
1492  }
1493 
1494  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495  total_packets, total_size);
1496  }
1497  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499  if (pass1_used) {
1500  av_log(NULL, AV_LOG_WARNING, "\n");
1501  } else {
1502  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503  }
1504  }
1505 }
1506 
1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1508 {
1509  char buf[1024];
1510  AVBPrint buf_script;
1511  OutputStream *ost;
1512  AVFormatContext *oc;
1513  int64_t total_size;
1514  AVCodecContext *enc;
1515  int frame_number, vid, i;
1516  double bitrate;
1517  double speed;
1518  int64_t pts = INT64_MIN + 1;
1519  static int64_t last_time = -1;
1520  static int qp_histogram[52];
1521  int hours, mins, secs, us;
1522  int ret;
1523  float t;
1524 
1525  if (!print_stats && !is_last_report && !progress_avio)
1526  return;
1527 
1528  if (!is_last_report) {
1529  if (last_time == -1) {
1530  last_time = cur_time;
1531  return;
1532  }
1533  if ((cur_time - last_time) < 500000)
1534  return;
1535  last_time = cur_time;
1536  }
1537 
1538  t = (cur_time-timer_start) / 1000000.0;
1539 
1540 
1541  oc = output_files[0]->ctx;
1542 
1543  total_size = avio_size(oc->pb);
1544  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1545  total_size = avio_tell(oc->pb);
1546 
1547  buf[0] = '\0';
1548  vid = 0;
1549  av_bprint_init(&buf_script, 0, 1);
1550  for (i = 0; i < nb_output_streams; i++) {
1551  float q = -1;
1552  ost = output_streams[i];
1553  enc = ost->enc_ctx;
1554  if (!ost->stream_copy)
1555  q = ost->quality / (float) FF_QP2LAMBDA;
1556 
1557  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1558  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1559  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1560  ost->file_index, ost->index, q);
1561  }
1562  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1563  float fps;
1564 
1565  frame_number = ost->frame_number;
1566  fps = t > 1 ? frame_number / t : 0;
1567  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1568  frame_number, fps < 9.95, fps, q);
1569  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1570  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1571  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1572  ost->file_index, ost->index, q);
1573  if (is_last_report)
1574  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1575  if (qp_hist) {
1576  int j;
1577  int qp = lrintf(q);
1578  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1579  qp_histogram[qp]++;
1580  for (j = 0; j < 32; j++)
1581  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1582  }
1583 
1584  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1585  int j;
1586  double error, error_sum = 0;
1587  double scale, scale_sum = 0;
1588  double p;
1589  char type[3] = { 'Y','U','V' };
1590  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1591  for (j = 0; j < 3; j++) {
1592  if (is_last_report) {
1593  error = enc->error[j];
1594  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1595  } else {
1596  error = ost->error[j];
1597  scale = enc->width * enc->height * 255.0 * 255.0;
1598  }
1599  if (j)
1600  scale /= 4;
1601  error_sum += error;
1602  scale_sum += scale;
1603  p = psnr(error / scale);
1604  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1605  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1606  ost->file_index, ost->index, type[j] | 32, p);
1607  }
1608  p = psnr(error_sum / scale_sum);
1609  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1610  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1611  ost->file_index, ost->index, p);
1612  }
1613  vid = 1;
1614  }
1615  /* compute min output value */
1617  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1618  ost->st->time_base, AV_TIME_BASE_Q));
1619  if (is_last_report)
1620  nb_frames_drop += ost->last_dropped;
1621  }
1622 
1623  secs = FFABS(pts) / AV_TIME_BASE;
1624  us = FFABS(pts) % AV_TIME_BASE;
1625  mins = secs / 60;
1626  secs %= 60;
1627  hours = mins / 60;
1628  mins %= 60;
1629 
1630  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1631  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1632 
1633  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1634  "size=N/A time=");
1635  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1636  "size=%8.0fkB time=", total_size / 1024.0);
1637  if (pts < 0)
1638  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1639  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1640  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1641  (100 * us) / AV_TIME_BASE);
1642 
1643  if (bitrate < 0) {
1644  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1645  av_bprintf(&buf_script, "bitrate=N/A\n");
1646  }else{
1647  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1648  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1649  }
1650 
1651  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1652  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1653  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1654  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1655  hours, mins, secs, us);
1656 
1658  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1660  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1661  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1662 
1663  if (speed < 0) {
1664  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1665  av_bprintf(&buf_script, "speed=N/A\n");
1666  } else {
1667  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1668  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1669  }
1670 
1671  if (print_stats || is_last_report) {
1672  const char end = is_last_report ? '\n' : '\r';
1673  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1674  fprintf(stderr, "%s %c", buf, end);
1675  } else
1676  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1677 
1678  fflush(stderr);
1679  }
1680 
1681  if (progress_avio) {
1682  av_bprintf(&buf_script, "progress=%s\n",
1683  is_last_report ? "end" : "continue");
1684  avio_write(progress_avio, buf_script.str,
1685  FFMIN(buf_script.len, buf_script.size - 1));
1686  avio_flush(progress_avio);
1687  av_bprint_finalize(&buf_script, NULL);
1688  if (is_last_report) {
1689  if ((ret = avio_closep(&progress_avio)) < 0)
1691  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1692  }
1693  }
1694 
1695  if (is_last_report)
1696  print_final_stats(total_size);
1697 }
1698 
1699 static void flush_encoders(void)
1700 {
1701  int i, ret;
1702 
1703  for (i = 0; i < nb_output_streams; i++) {
1704  OutputStream *ost = output_streams[i];
1705  AVCodecContext *enc = ost->enc_ctx;
1706  AVFormatContext *os = output_files[ost->file_index]->ctx;
1707  int stop_encoding = 0;
1708 
1709  if (!ost->encoding_needed)
1710  continue;
1711 
1712  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1713  continue;
1714 #if FF_API_LAVF_FMT_RAWPICTURE
1715  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1716  continue;
1717 #endif
1718 
1719  for (;;) {
1720  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1721  const char *desc;
1722 
1723  switch (enc->codec_type) {
1724  case AVMEDIA_TYPE_AUDIO:
1725  encode = avcodec_encode_audio2;
1726  desc = "audio";
1727  break;
1728  case AVMEDIA_TYPE_VIDEO:
1729  encode = avcodec_encode_video2;
1730  desc = "video";
1731  break;
1732  default:
1733  stop_encoding = 1;
1734  }
1735 
1736  if (encode) {
1737  AVPacket pkt;
1738  int pkt_size;
1739  int got_packet;
1740  av_init_packet(&pkt);
1741  pkt.data = NULL;
1742  pkt.size = 0;
1743 
1745  ret = encode(enc, &pkt, NULL, &got_packet);
1746  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1747  if (ret < 0) {
1748  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1749  desc,
1750  av_err2str(ret));
1751  exit_program(1);
1752  }
1753  if (ost->logfile && enc->stats_out) {
1754  fprintf(ost->logfile, "%s", enc->stats_out);
1755  }
1756  if (!got_packet) {
1757  stop_encoding = 1;
1758  break;
1759  }
1760  if (ost->finished & MUXER_FINISHED) {
1761  av_packet_unref(&pkt);
1762  continue;
1763  }
1764  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1765  pkt_size = pkt.size;
1766  write_frame(os, &pkt, ost);
1768  do_video_stats(ost, pkt_size);
1769  }
1770  }
1771 
1772  if (stop_encoding)
1773  break;
1774  }
1775  }
1776 }
1777 
1778 /*
1779  * Check whether a packet from ist should be written into ost at this time
1780  */
1782 {
1783  OutputFile *of = output_files[ost->file_index];
1784  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1785 
1786  if (ost->source_index != ist_index)
1787  return 0;
1788 
1789  if (ost->finished)
1790  return 0;
1791 
1792  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1793  return 0;
1794 
1795  return 1;
1796 }
1797 
1798 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1799 {
1800  OutputFile *of = output_files[ost->file_index];
1801  InputFile *f = input_files [ist->file_index];
1802  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1803  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1804  AVPicture pict;
1805  AVPacket opkt;
1806 
1807  av_init_packet(&opkt);
1808 
1809  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1811  return;
1812 
1813  if (!ost->frame_number && !ost->copy_prior_start) {
1814  int64_t comp_start = start_time;
1815  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1816  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1817  if (pkt->pts == AV_NOPTS_VALUE ?
1818  ist->pts < comp_start :
1819  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1820  return;
1821  }
1822 
1823  if (of->recording_time != INT64_MAX &&
1824  ist->pts >= of->recording_time + start_time) {
1825  close_output_stream(ost);
1826  return;
1827  }
1828 
1829  if (f->recording_time != INT64_MAX) {
1830  start_time = f->ctx->start_time;
1831  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1832  start_time += f->start_time;
1833  if (ist->pts >= f->recording_time + start_time) {
1834  close_output_stream(ost);
1835  return;
1836  }
1837  }
1838 
1839  /* force the input stream PTS */
1840  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1841  ost->sync_opts++;
1842 
1843  if (pkt->pts != AV_NOPTS_VALUE)
1844  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1845  else
1846  opkt.pts = AV_NOPTS_VALUE;
1847 
1848  if (pkt->dts == AV_NOPTS_VALUE)
1849  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1850  else
1851  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1852  opkt.dts -= ost_tb_start_time;
1853 
1854  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1856  if(!duration)
1857  duration = ist->dec_ctx->frame_size;
1858  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1860  ost->st->time_base) - ost_tb_start_time;
1861  }
1862 
1863  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1864  opkt.flags = pkt->flags;
1865  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1866  if ( ost->st->codec->codec_id != AV_CODEC_ID_H264
1867  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG1VIDEO
1868  && ost->st->codec->codec_id != AV_CODEC_ID_MPEG2VIDEO
1869  && ost->st->codec->codec_id != AV_CODEC_ID_VC1
1870  ) {
1871  int ret = av_parser_change(ost->parser, ost->st->codec,
1872  &opkt.data, &opkt.size,
1873  pkt->data, pkt->size,
1875  if (ret < 0) {
1876  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1877  av_err2str(ret));
1878  exit_program(1);
1879  }
1880  if (ret) {
1881  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1882  if (!opkt.buf)
1883  exit_program(1);
1884  }
1885  } else {
1886  opkt.data = pkt->data;
1887  opkt.size = pkt->size;
1888  }
1889  av_copy_packet_side_data(&opkt, pkt);
1890 
1891 #if FF_API_LAVF_FMT_RAWPICTURE
1892  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1893  ost->st->codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
1894  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1895  /* store AVPicture in AVPacket, as expected by the output format */
1896  int ret = avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1897  if (ret < 0) {
1898  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1899  av_err2str(ret));
1900  exit_program(1);
1901  }
1902  opkt.data = (uint8_t *)&pict;
1903  opkt.size = sizeof(AVPicture);
1904  opkt.flags |= AV_PKT_FLAG_KEY;
1905  }
1906 #endif
1907 
1908  write_frame(of->ctx, &opkt, ost);
1909 }
1910 
1912 {
1913  AVCodecContext *dec = ist->dec_ctx;
1914 
1915  if (!dec->channel_layout) {
1916  char layout_name[256];
1917 
1918  if (dec->channels > ist->guess_layout_max)
1919  return 0;
1921  if (!dec->channel_layout)
1922  return 0;
1923  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1924  dec->channels, dec->channel_layout);
1925  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1926  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1927  }
1928  return 1;
1929 }
1930 
1931 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1932 {
1933  if (*got_output || ret<0)
1934  decode_error_stat[ret<0] ++;
1935 
1936  if (ret < 0 && exit_on_error)
1937  exit_program(1);
1938 
1939  if (exit_on_error && *got_output && ist) {
1941  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
1942  exit_program(1);
1943  }
1944  }
1945 }
1946 
1947 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1948 {
1949  AVFrame *decoded_frame, *f;
1950  AVCodecContext *avctx = ist->dec_ctx;
1951  int i, ret, err = 0, resample_changed;
1952  AVRational decoded_frame_tb;
1953 
1954  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1955  return AVERROR(ENOMEM);
1956  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1957  return AVERROR(ENOMEM);
1958  decoded_frame = ist->decoded_frame;
1959 
1961  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1962  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1963 
1964  if (ret >= 0 && avctx->sample_rate <= 0) {
1965  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1966  ret = AVERROR_INVALIDDATA;
1967  }
1968 
1969  check_decode_result(ist, got_output, ret);
1970 
1971  if (!*got_output || ret < 0)
1972  return ret;
1973 
1974  ist->samples_decoded += decoded_frame->nb_samples;
1975  ist->frames_decoded++;
1976 
1977 #if 1
1978  /* increment next_dts to use for the case where the input stream does not
1979  have timestamps or there are multiple frames in the packet */
1980  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1981  avctx->sample_rate;
1982  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1983  avctx->sample_rate;
1984 #endif
1985 
1986  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1987  ist->resample_channels != avctx->channels ||
1988  ist->resample_channel_layout != decoded_frame->channel_layout ||
1989  ist->resample_sample_rate != decoded_frame->sample_rate;
1990  if (resample_changed) {
1991  char layout1[64], layout2[64];
1992 
1993  if (!guess_input_channel_layout(ist)) {
1994  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1995  "layout for Input Stream #%d.%d\n", ist->file_index,
1996  ist->st->index);
1997  exit_program(1);
1998  }
1999  decoded_frame->channel_layout = avctx->channel_layout;
2000 
2001  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2003  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2004  decoded_frame->channel_layout);
2005 
2007  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2008  ist->file_index, ist->st->index,
2010  ist->resample_channels, layout1,
2011  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2012  avctx->channels, layout2);
2013 
2014  ist->resample_sample_fmt = decoded_frame->format;
2015  ist->resample_sample_rate = decoded_frame->sample_rate;
2016  ist->resample_channel_layout = decoded_frame->channel_layout;
2017  ist->resample_channels = avctx->channels;
2018 
2019  for (i = 0; i < nb_filtergraphs; i++)
2020  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2021  FilterGraph *fg = filtergraphs[i];
2022  if (configure_filtergraph(fg) < 0) {
2023  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2024  exit_program(1);
2025  }
2026  }
2027  }
2028 
2029  /* if the decoder provides a pts, use it instead of the last packet pts.
2030  the decoder could be delaying output by a packet or more. */
2031  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2032  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
2033  decoded_frame_tb = avctx->time_base;
2034  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
2035  decoded_frame->pts = decoded_frame->pkt_pts;
2036  decoded_frame_tb = ist->st->time_base;
2037  } else if (pkt->pts != AV_NOPTS_VALUE) {
2038  decoded_frame->pts = pkt->pts;
2039  decoded_frame_tb = ist->st->time_base;
2040  }else {
2041  decoded_frame->pts = ist->dts;
2042  decoded_frame_tb = AV_TIME_BASE_Q;
2043  }
2044  pkt->pts = AV_NOPTS_VALUE;
2045  if (decoded_frame->pts != AV_NOPTS_VALUE)
2046  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2047  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2048  (AVRational){1, avctx->sample_rate});
2049  ist->nb_samples = decoded_frame->nb_samples;
2050  for (i = 0; i < ist->nb_filters; i++) {
2051  if (i < ist->nb_filters - 1) {
2052  f = ist->filter_frame;
2053  err = av_frame_ref(f, decoded_frame);
2054  if (err < 0)
2055  break;
2056  } else
2057  f = decoded_frame;
2058  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2060  if (err == AVERROR_EOF)
2061  err = 0; /* ignore */
2062  if (err < 0)
2063  break;
2064  }
2065  decoded_frame->pts = AV_NOPTS_VALUE;
2066 
2067  av_frame_unref(ist->filter_frame);
2068  av_frame_unref(decoded_frame);
2069  return err < 0 ? err : ret;
2070 }
2071 
2072 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
2073 {
2074  AVFrame *decoded_frame, *f;
2075  int i, ret = 0, err = 0, resample_changed;
2076  int64_t best_effort_timestamp;
2077  AVRational *frame_sample_aspect;
2078 
2079  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2080  return AVERROR(ENOMEM);
2081  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2082  return AVERROR(ENOMEM);
2083  decoded_frame = ist->decoded_frame;
2084  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2085 
2087  ret = avcodec_decode_video2(ist->dec_ctx,
2088  decoded_frame, got_output, pkt);
2089  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2090 
2091  // The following line may be required in some cases where there is no parser
2092  // or the parser does not has_b_frames correctly
2093  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
2094  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2095  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
2096  } else
2098  "has_b_frames is larger in decoder than demuxer %d > %d.\n"
2099  "If you want to help, upload a sample "
2100  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2101  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2102  ist->dec_ctx->has_b_frames,
2103  ist->st->codec->has_b_frames);
2104  }
2105 
2106  check_decode_result(ist, got_output, ret);
2107 
2108  if (*got_output && ret >= 0) {
2109  if (ist->dec_ctx->width != decoded_frame->width ||
2110  ist->dec_ctx->height != decoded_frame->height ||
2111  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2112  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2113  decoded_frame->width,
2114  decoded_frame->height,
2115  decoded_frame->format,
2116  ist->dec_ctx->width,
2117  ist->dec_ctx->height,
2118  ist->dec_ctx->pix_fmt);
2119  }
2120  }
2121 
2122  if (!*got_output || ret < 0)
2123  return ret;
2124 
2125  if(ist->top_field_first>=0)
2126  decoded_frame->top_field_first = ist->top_field_first;
2127 
2128  ist->frames_decoded++;
2129 
2130  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2131  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2132  if (err < 0)
2133  goto fail;
2134  }
2135  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2136 
2137  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2138  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2139  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2140 
2141  if (ts != AV_NOPTS_VALUE)
2142  ist->next_pts = ist->pts = ts;
2143  }
2144 
2145  if (debug_ts) {
2146  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2147  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2148  ist->st->index, av_ts2str(decoded_frame->pts),
2149  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2150  best_effort_timestamp,
2151  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2152  decoded_frame->key_frame, decoded_frame->pict_type,
2153  ist->st->time_base.num, ist->st->time_base.den);
2154  }
2155 
2156  pkt->size = 0;
2157 
2158  if (ist->st->sample_aspect_ratio.num)
2159  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2160 
2161  resample_changed = ist->resample_width != decoded_frame->width ||
2162  ist->resample_height != decoded_frame->height ||
2163  ist->resample_pix_fmt != decoded_frame->format;
2164  if (resample_changed) {
2166  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2167  ist->file_index, ist->st->index,
2169  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2170 
2171  ist->resample_width = decoded_frame->width;
2172  ist->resample_height = decoded_frame->height;
2173  ist->resample_pix_fmt = decoded_frame->format;
2174 
2175  for (i = 0; i < nb_filtergraphs; i++) {
2176  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2177  configure_filtergraph(filtergraphs[i]) < 0) {
2178  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2179  exit_program(1);
2180  }
2181  }
2182  }
2183 
2184  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2185  for (i = 0; i < ist->nb_filters; i++) {
2186  if (!frame_sample_aspect->num)
2187  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2188 
2189  if (i < ist->nb_filters - 1) {
2190  f = ist->filter_frame;
2191  err = av_frame_ref(f, decoded_frame);
2192  if (err < 0)
2193  break;
2194  } else
2195  f = decoded_frame;
2197  if (ret == AVERROR_EOF) {
2198  ret = 0; /* ignore */
2199  } else if (ret < 0) {
2201  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2202  exit_program(1);
2203  }
2204  }
2205 
2206 fail:
2208  av_frame_unref(decoded_frame);
2209  return err < 0 ? err : ret;
2210 }
2211 
2212 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2213 {
2214  AVSubtitle subtitle;
2215  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2216  &subtitle, got_output, pkt);
2217 
2218  check_decode_result(NULL, got_output, ret);
2219 
2220  if (ret < 0 || !*got_output) {
2221  if (!pkt->size)
2222  sub2video_flush(ist);
2223  return ret;
2224  }
2225 
2226  if (ist->fix_sub_duration) {
2227  int end = 1;
2228  if (ist->prev_sub.got_output) {
2229  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2230  1000, AV_TIME_BASE);
2231  if (end < ist->prev_sub.subtitle.end_display_time) {
2232  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2233  "Subtitle duration reduced from %d to %d%s\n",
2235  end <= 0 ? ", dropping it" : "");
2237  }
2238  }
2239  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2240  FFSWAP(int, ret, ist->prev_sub.ret);
2241  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2242  if (end <= 0)
2243  goto out;
2244  }
2245 
2246  if (!*got_output)
2247  return ret;
2248 
2249  sub2video_update(ist, &subtitle);
2250 
2251  if (!subtitle.num_rects)
2252  goto out;
2253 
2254  ist->frames_decoded++;
2255 
2256  for (i = 0; i < nb_output_streams; i++) {
2257  OutputStream *ost = output_streams[i];
2258 
2259  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2260  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2261  continue;
2262 
2263  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2264  }
2265 
2266 out:
2267  avsubtitle_free(&subtitle);
2268  return ret;
2269 }
2270 
2272 {
2273  int i, ret;
2274  for (i = 0; i < ist->nb_filters; i++) {
2275  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2276  if (ret < 0)
2277  return ret;
2278  }
2279  return 0;
2280 }
2281 
2282 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2283 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2284 {
2285  int ret = 0, i;
2286  int got_output = 0;
2287 
2288  AVPacket avpkt;
2289  if (!ist->saw_first_ts) {
2290  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2291  ist->pts = 0;
2292  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2293  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2294  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2295  }
2296  ist->saw_first_ts = 1;
2297  }
2298 
2299  if (ist->next_dts == AV_NOPTS_VALUE)
2300  ist->next_dts = ist->dts;
2301  if (ist->next_pts == AV_NOPTS_VALUE)
2302  ist->next_pts = ist->pts;
2303 
2304  if (!pkt) {
2305  /* EOF handling */
2306  av_init_packet(&avpkt);
2307  avpkt.data = NULL;
2308  avpkt.size = 0;
2309  goto handle_eof;
2310  } else {
2311  avpkt = *pkt;
2312  }
2313 
2314  if (pkt->dts != AV_NOPTS_VALUE) {
2315  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2316  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2317  ist->next_pts = ist->pts = ist->dts;
2318  }
2319 
2320  // while we have more to decode or while the decoder did output something on EOF
2321  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2322  int duration;
2323  handle_eof:
2324 
2325  ist->pts = ist->next_pts;
2326  ist->dts = ist->next_dts;
2327 
2328  if (avpkt.size && avpkt.size != pkt->size &&
2331  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2332  ist->showed_multi_packet_warning = 1;
2333  }
2334 
2335  switch (ist->dec_ctx->codec_type) {
2336  case AVMEDIA_TYPE_AUDIO:
2337  ret = decode_audio (ist, &avpkt, &got_output);
2338  break;
2339  case AVMEDIA_TYPE_VIDEO:
2340  ret = decode_video (ist, &avpkt, &got_output);
2341  if (avpkt.duration) {
2342  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2343  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2345  duration = ((int64_t)AV_TIME_BASE *
2346  ist->dec_ctx->framerate.den * ticks) /
2348  } else
2349  duration = 0;
2350 
2351  if(ist->dts != AV_NOPTS_VALUE && duration) {
2352  ist->next_dts += duration;
2353  }else
2354  ist->next_dts = AV_NOPTS_VALUE;
2355 
2356  if (got_output)
2357  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2358  break;
2359  case AVMEDIA_TYPE_SUBTITLE:
2360  ret = transcode_subtitles(ist, &avpkt, &got_output);
2361  break;
2362  default:
2363  return -1;
2364  }
2365 
2366  if (ret < 0) {
2367  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2368  ist->file_index, ist->st->index, av_err2str(ret));
2369  if (exit_on_error)
2370  exit_program(1);
2371  break;
2372  }
2373 
2374  avpkt.dts=
2375  avpkt.pts= AV_NOPTS_VALUE;
2376 
2377  // touch data and size only if not EOF
2378  if (pkt) {
2380  ret = avpkt.size;
2381  avpkt.data += ret;
2382  avpkt.size -= ret;
2383  }
2384  if (!got_output) {
2385  continue;
2386  }
2387  if (got_output && !pkt)
2388  break;
2389  }
2390 
2391  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2392  /* except when looping we need to flush but not to send an EOF */
2393  if (!pkt && ist->decoding_needed && !got_output && !no_eof) {
2394  int ret = send_filter_eof(ist);
2395  if (ret < 0) {
2396  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2397  exit_program(1);
2398  }
2399  }
2400 
2401  /* handle stream copy */
2402  if (!ist->decoding_needed) {
2403  ist->dts = ist->next_dts;
2404  switch (ist->dec_ctx->codec_type) {
2405  case AVMEDIA_TYPE_AUDIO:
2406  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2407  ist->dec_ctx->sample_rate;
2408  break;
2409  case AVMEDIA_TYPE_VIDEO:
2410  if (ist->framerate.num) {
2411  // TODO: Remove work-around for c99-to-c89 issue 7
2412  AVRational time_base_q = AV_TIME_BASE_Q;
2413  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2414  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2415  } else if (pkt->duration) {
2416  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2417  } else if(ist->dec_ctx->framerate.num != 0) {
2418  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2419  ist->next_dts += ((int64_t)AV_TIME_BASE *
2420  ist->dec_ctx->framerate.den * ticks) /
2422  }
2423  break;
2424  }
2425  ist->pts = ist->dts;
2426  ist->next_pts = ist->next_dts;
2427  }
2428  for (i = 0; pkt && i < nb_output_streams; i++) {
2429  OutputStream *ost = output_streams[i];
2430 
2431  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2432  continue;
2433 
2434  do_streamcopy(ist, ost, pkt);
2435  }
2436 
2437  return got_output;
2438 }
2439 
2440 static void print_sdp(void)
2441 {
2442  char sdp[16384];
2443  int i;
2444  int j;
2445  AVIOContext *sdp_pb;
2446  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2447 
2448  if (!avc)
2449  exit_program(1);
2450  for (i = 0, j = 0; i < nb_output_files; i++) {
2451  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2452  avc[j] = output_files[i]->ctx;
2453  j++;
2454  }
2455  }
2456 
2457  if (!j)
2458  goto fail;
2459 
2460  av_sdp_create(avc, j, sdp, sizeof(sdp));
2461 
2462  if (!sdp_filename) {
2463  printf("SDP:\n%s\n", sdp);
2464  fflush(stdout);
2465  } else {
2466  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2467  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2468  } else {
2469  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2470  avio_closep(&sdp_pb);
2472  }
2473  }
2474 
2475 fail:
2476  av_freep(&avc);
2477 }
2478 
2480 {
2481  int i;
2482  for (i = 0; hwaccels[i].name; i++)
2483  if (hwaccels[i].pix_fmt == pix_fmt)
2484  return &hwaccels[i];
2485  return NULL;
2486 }
2487 
2489 {
2490  InputStream *ist = s->opaque;
2491  const enum AVPixelFormat *p;
2492  int ret;
2493 
2494  for (p = pix_fmts; *p != -1; p++) {
2496  const HWAccel *hwaccel;
2497 
2498  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2499  break;
2500 
2501  hwaccel = get_hwaccel(*p);
2502  if (!hwaccel ||
2503  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2504  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2505  continue;
2506 
2507  ret = hwaccel->init(s);
2508  if (ret < 0) {
2509  if (ist->hwaccel_id == hwaccel->id) {
2511  "%s hwaccel requested for input stream #%d:%d, "
2512  "but cannot be initialized.\n", hwaccel->name,
2513  ist->file_index, ist->st->index);
2514  return AV_PIX_FMT_NONE;
2515  }
2516  continue;
2517  }
2518  ist->active_hwaccel_id = hwaccel->id;
2519  ist->hwaccel_pix_fmt = *p;
2520  break;
2521  }
2522 
2523  return *p;
2524 }
2525 
2527 {
2528  InputStream *ist = s->opaque;
2529 
2530  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2531  return ist->hwaccel_get_buffer(s, frame, flags);
2532 
2533  return avcodec_default_get_buffer2(s, frame, flags);
2534 }
2535 
2536 static int init_input_stream(int ist_index, char *error, int error_len)
2537 {
2538  int ret;
2539  InputStream *ist = input_streams[ist_index];
2540 
2541  if (ist->decoding_needed) {
2542  AVCodec *codec = ist->dec;
2543  if (!codec) {
2544  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2545  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2546  return AVERROR(EINVAL);
2547  }
2548 
2549  ist->dec_ctx->opaque = ist;
2550  ist->dec_ctx->get_format = get_format;
2551  ist->dec_ctx->get_buffer2 = get_buffer;
2552  ist->dec_ctx->thread_safe_callbacks = 1;
2553 
2554  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2555  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2556  (ist->decoding_needed & DECODING_FOR_OST)) {
2557  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2559  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2560  }
2561 
2562  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2563 
2564  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2565  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2566  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2567  if (ret == AVERROR_EXPERIMENTAL)
2568  abort_codec_experimental(codec, 0);
2569 
2570  snprintf(error, error_len,
2571  "Error while opening decoder for input stream "
2572  "#%d:%d : %s",
2573  ist->file_index, ist->st->index, av_err2str(ret));
2574  return ret;
2575  }
2577  }
2578 
2579  ist->next_pts = AV_NOPTS_VALUE;
2580  ist->next_dts = AV_NOPTS_VALUE;
2581 
2582  return 0;
2583 }
2584 
2586 {
2587  if (ost->source_index >= 0)
2588  return input_streams[ost->source_index];
2589  return NULL;
2590 }
2591 
2592 static int compare_int64(const void *a, const void *b)
2593 {
2594  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2595 }
2596 
2597 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2598 {
2599  int ret = 0;
2600 
2601  if (ost->encoding_needed) {
2602  AVCodec *codec = ost->enc;
2603  AVCodecContext *dec = NULL;
2604  InputStream *ist;
2605 
2606  if ((ist = get_input_stream(ost)))
2607  dec = ist->dec_ctx;
2608  if (dec && dec->subtitle_header) {
2609  /* ASS code assumes this buffer is null terminated so add extra byte. */
2611  if (!ost->enc_ctx->subtitle_header)
2612  return AVERROR(ENOMEM);
2613  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
2615  }
2616  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
2617  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
2618  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2619  !codec->defaults &&
2620  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
2621  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
2622  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
2623 
2624  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
2626  if (!ost->enc_ctx->hw_frames_ctx)
2627  return AVERROR(ENOMEM);
2628  }
2629 
2630  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
2631  if (ret == AVERROR_EXPERIMENTAL)
2632  abort_codec_experimental(codec, 1);
2633  snprintf(error, error_len,
2634  "Error while opening encoder for output stream #%d:%d - "
2635  "maybe incorrect parameters such as bit_rate, rate, width or height",
2636  ost->file_index, ost->index);
2637  return ret;
2638  }
2639  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
2640  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
2642  ost->enc_ctx->frame_size);
2644  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
2645  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
2646  " It takes bits/s as argument, not kbits/s\n");
2647 
2648  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
2649  if (ret < 0) {
2651  "Error initializing the output stream codec context.\n");
2652  exit_program(1);
2653  }
2654 
2655  if (ost->enc_ctx->nb_coded_side_data) {
2656  int i;
2657 
2659  sizeof(*ost->st->side_data));
2660  if (!ost->st->side_data)
2661  return AVERROR(ENOMEM);
2662 
2663  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
2664  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
2665  AVPacketSideData *sd_dst = &ost->st->side_data[i];
2666 
2667  sd_dst->data = av_malloc(sd_src->size);
2668  if (!sd_dst->data)
2669  return AVERROR(ENOMEM);
2670  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2671  sd_dst->size = sd_src->size;
2672  sd_dst->type = sd_src->type;
2673  ost->st->nb_side_data++;
2674  }
2675  }
2676 
2677  // copy timebase while removing common factors
2678  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
2679  ost->st->codec->codec= ost->enc_ctx->codec;
2680  } else {
2681  ret = av_opt_set_dict(ost->st->codec, &ost->encoder_opts);
2682  if (ret < 0) {
2684  "Error setting up codec context options.\n");
2685  return ret;
2686  }
2687  // copy timebase while removing common factors
2688  ost->st->time_base = av_add_q(ost->st->codec->time_base, (AVRational){0, 1});
2689  }
2690 
2691  return ret;
2692 }
2693 
2694 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2695  AVCodecContext *avctx)
2696 {
2697  char *p;
2698  int n = 1, i, size, index = 0;
2699  int64_t t, *pts;
2700 
2701  for (p = kf; *p; p++)
2702  if (*p == ',')
2703  n++;
2704  size = n;
2705  pts = av_malloc_array(size, sizeof(*pts));
2706  if (!pts) {
2707  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2708  exit_program(1);
2709  }
2710 
2711  p = kf;
2712  for (i = 0; i < n; i++) {
2713  char *next = strchr(p, ',');
2714 
2715  if (next)
2716  *next++ = 0;
2717 
2718  if (!memcmp(p, "chapters", 8)) {
2719 
2720  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2721  int j;
2722 
2723  if (avf->nb_chapters > INT_MAX - size ||
2724  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2725  sizeof(*pts)))) {
2727  "Could not allocate forced key frames array.\n");
2728  exit_program(1);
2729  }
2730  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2731  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2732 
2733  for (j = 0; j < avf->nb_chapters; j++) {
2734  AVChapter *c = avf->chapters[j];
2735  av_assert1(index < size);
2736  pts[index++] = av_rescale_q(c->start, c->time_base,
2737  avctx->time_base) + t;
2738  }
2739 
2740  } else {
2741 
2742  t = parse_time_or_die("force_key_frames", p, 1);
2743  av_assert1(index < size);
2744  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2745 
2746  }
2747 
2748  p = next;
2749  }
2750 
2751  av_assert0(index == size);
2752  qsort(pts, size, sizeof(*pts), compare_int64);
2753  ost->forced_kf_count = size;
2754  ost->forced_kf_pts = pts;
2755 }
2756 
2757 static void report_new_stream(int input_index, AVPacket *pkt)
2758 {
2759  InputFile *file = input_files[input_index];
2760  AVStream *st = file->ctx->streams[pkt->stream_index];
2761 
2762  if (pkt->stream_index < file->nb_streams_warn)
2763  return;
2764  av_log(file->ctx, AV_LOG_WARNING,
2765  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2766  av_get_media_type_string(st->codec->codec_type),
2767  input_index, pkt->stream_index,
2768  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2769  file->nb_streams_warn = pkt->stream_index + 1;
2770 }
2771 
2773 {
2774  AVDictionaryEntry *e;
2775 
2776  uint8_t *encoder_string;
2777  int encoder_string_len;
2778  int format_flags = 0;
2779  int codec_flags = 0;
2780 
2781  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2782  return;
2783 
2784  e = av_dict_get(of->opts, "fflags", NULL, 0);
2785  if (e) {
2786  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2787  if (!o)
2788  return;
2789  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2790  }
2791  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2792  if (e) {
2793  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2794  if (!o)
2795  return;
2796  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2797  }
2798 
2799  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2800  encoder_string = av_mallocz(encoder_string_len);
2801  if (!encoder_string)
2802  exit_program(1);
2803 
2804  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2805  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2806  else
2807  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2808  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2809  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2811 }
2812 
2813 static int transcode_init(void)
2814 {
2815  int ret = 0, i, j, k;
2816  AVFormatContext *oc;
2817  OutputStream *ost;
2818  InputStream *ist;
2819  char error[1024] = {0};
2820  int want_sdp = 1;
2821 
2822  for (i = 0; i < nb_filtergraphs; i++) {
2823  FilterGraph *fg = filtergraphs[i];
2824  for (j = 0; j < fg->nb_outputs; j++) {
2825  OutputFilter *ofilter = fg->outputs[j];
2826  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2827  continue;
2828  if (fg->nb_inputs != 1)
2829  continue;
2830  for (k = nb_input_streams-1; k >= 0 ; k--)
2831  if (fg->inputs[0]->ist == input_streams[k])
2832  break;
2833  ofilter->ost->source_index = k;
2834  }
2835  }
2836 
2837  /* init framerate emulation */
2838  for (i = 0; i < nb_input_files; i++) {
2839  InputFile *ifile = input_files[i];
2840  if (ifile->rate_emu)
2841  for (j = 0; j < ifile->nb_streams; j++)
2842  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2843  }
2844 
2845  /* for each output stream, we compute the right encoding parameters */
2846  for (i = 0; i < nb_output_streams; i++) {
2847  AVCodecContext *enc_ctx;
2849  ost = output_streams[i];
2850  oc = output_files[ost->file_index]->ctx;
2851  ist = get_input_stream(ost);
2852 
2853  if (ost->attachment_filename)
2854  continue;
2855 
2856  enc_ctx = ost->stream_copy ? ost->st->codec : ost->enc_ctx;
2857 
2858  if (ist) {
2859  dec_ctx = ist->dec_ctx;
2860 
2861  ost->st->disposition = ist->st->disposition;
2862  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2863  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2864  } else {
2865  for (j=0; j<oc->nb_streams; j++) {
2866  AVStream *st = oc->streams[j];
2867  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2868  break;
2869  }
2870  if (j == oc->nb_streams)
2871  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2873  }
2874 
2875  if (ost->stream_copy) {
2876  AVRational sar;
2877  uint64_t extra_size;
2878 
2879  av_assert0(ist && !ost->filter);
2880 
2881  extra_size = (uint64_t)dec_ctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2882 
2883  if (extra_size > INT_MAX) {
2884  return AVERROR(EINVAL);
2885  }
2886 
2887  /* if stream_copy is selected, no need to decode or encode */
2888  enc_ctx->codec_id = dec_ctx->codec_id;
2889  enc_ctx->codec_type = dec_ctx->codec_type;
2890 
2891  if (!enc_ctx->codec_tag) {
2892  unsigned int codec_tag;
2893  if (!oc->oformat->codec_tag ||
2894  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2895  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2896  enc_ctx->codec_tag = dec_ctx->codec_tag;
2897  }
2898 
2899  enc_ctx->bit_rate = dec_ctx->bit_rate;
2900  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2901  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2902  enc_ctx->field_order = dec_ctx->field_order;
2903  if (dec_ctx->extradata_size) {
2904  enc_ctx->extradata = av_mallocz(extra_size);
2905  if (!enc_ctx->extradata) {
2906  return AVERROR(ENOMEM);
2907  }
2908  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2909  }
2910  enc_ctx->extradata_size= dec_ctx->extradata_size;
2911  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2912 
2913  enc_ctx->time_base = ist->st->time_base;
2914  /*
2915  * Avi is a special case here because it supports variable fps but
2916  * having the fps and timebase differe significantly adds quite some
2917  * overhead
2918  */
2919  if(!strcmp(oc->oformat->name, "avi")) {
2920  if ( copy_tb<0 && ist->st->r_frame_rate.num
2921  && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2922  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2923  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2924  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2925  || copy_tb==2){
2926  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2927  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2928  enc_ctx->ticks_per_frame = 2;
2929  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2930  && av_q2d(ist->st->time_base) < 1.0/500
2931  || copy_tb==0){
2932  enc_ctx->time_base = dec_ctx->time_base;
2933  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2934  enc_ctx->time_base.den *= 2;
2935  enc_ctx->ticks_per_frame = 2;
2936  }
2937  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2938  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2939  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2940  && strcmp(oc->oformat->name, "f4v")
2941  ) {
2942  if( copy_tb<0 && dec_ctx->time_base.den
2943  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2944  && av_q2d(ist->st->time_base) < 1.0/500
2945  || copy_tb==0){
2946  enc_ctx->time_base = dec_ctx->time_base;
2947  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2948  }
2949  }
2950  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2951  && dec_ctx->time_base.num < dec_ctx->time_base.den
2952  && dec_ctx->time_base.num > 0
2953  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2954  enc_ctx->time_base = dec_ctx->time_base;
2955  }
2956 
2957  if (!ost->frame_rate.num)
2958  ost->frame_rate = ist->framerate;
2959  if(ost->frame_rate.num)
2960  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2961 
2962  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2963  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2964 
2965  if (ist->st->nb_side_data) {
2967  sizeof(*ist->st->side_data));
2968  if (!ost->st->side_data)
2969  return AVERROR(ENOMEM);
2970 
2971  ost->st->nb_side_data = 0;
2972  for (j = 0; j < ist->st->nb_side_data; j++) {
2973  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2974  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2975 
2976  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2977  continue;
2978 
2979  sd_dst->data = av_malloc(sd_src->size);
2980  if (!sd_dst->data)
2981  return AVERROR(ENOMEM);
2982  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2983  sd_dst->size = sd_src->size;
2984  sd_dst->type = sd_src->type;
2985  ost->st->nb_side_data++;
2986  }
2987  }
2988 
2989  ost->parser = av_parser_init(enc_ctx->codec_id);
2990 
2991  switch (enc_ctx->codec_type) {
2992  case AVMEDIA_TYPE_AUDIO:
2993  if (audio_volume != 256) {
2994  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2995  exit_program(1);
2996  }
2997  enc_ctx->channel_layout = dec_ctx->channel_layout;
2998  enc_ctx->sample_rate = dec_ctx->sample_rate;
2999  enc_ctx->channels = dec_ctx->channels;
3000  enc_ctx->frame_size = dec_ctx->frame_size;
3001  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
3002  enc_ctx->block_align = dec_ctx->block_align;
3003  enc_ctx->initial_padding = dec_ctx->delay;
3004  enc_ctx->profile = dec_ctx->profile;
3005 #if FF_API_AUDIOENC_DELAY
3006  enc_ctx->delay = dec_ctx->delay;
3007 #endif
3008  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
3009  enc_ctx->block_align= 0;
3010  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
3011  enc_ctx->block_align= 0;
3012  break;
3013  case AVMEDIA_TYPE_VIDEO:
3014  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
3015  enc_ctx->colorspace = dec_ctx->colorspace;
3016  enc_ctx->color_range = dec_ctx->color_range;
3017  enc_ctx->color_primaries = dec_ctx->color_primaries;
3018  enc_ctx->color_trc = dec_ctx->color_trc;
3019  enc_ctx->width = dec_ctx->width;
3020  enc_ctx->height = dec_ctx->height;
3021  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
3022  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3023  sar =
3025  (AVRational){ enc_ctx->height, enc_ctx->width });
3026  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3027  "with stream copy may produce invalid files\n");
3028  }
3029  else if (ist->st->sample_aspect_ratio.num)
3030  sar = ist->st->sample_aspect_ratio;
3031  else
3032  sar = dec_ctx->sample_aspect_ratio;
3033  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
3034  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3035  ost->st->r_frame_rate = ist->st->r_frame_rate;
3036  break;
3037  case AVMEDIA_TYPE_SUBTITLE:
3038  enc_ctx->width = dec_ctx->width;
3039  enc_ctx->height = dec_ctx->height;
3040  break;
3041  case AVMEDIA_TYPE_UNKNOWN:
3042  case AVMEDIA_TYPE_DATA:
3044  break;
3045  default:
3046  abort();
3047  }
3048  } else {
3049  if (!ost->enc)
3050  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
3051  if (!ost->enc) {
3052  /* should only happen when a default codec is not present. */
3053  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
3054  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
3055  ret = AVERROR(EINVAL);
3056  goto dump_format;
3057  }
3058 
3059  set_encoder_id(output_files[ost->file_index], ost);
3060 
3061 #if CONFIG_LIBMFX
3062  if (qsv_transcode_init(ost))
3063  exit_program(1);
3064 #endif
3065 
3066 #if CONFIG_CUVID
3067  if (cuvid_transcode_init(ost))
3068  exit_program(1);
3069 #endif
3070 
3071  if (!ost->filter &&
3072  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3073  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
3074  FilterGraph *fg;
3075  fg = init_simple_filtergraph(ist, ost);
3076  if (configure_filtergraph(fg)) {
3077  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3078  exit_program(1);
3079  }
3080  }
3081 
3082  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3083  if (!ost->frame_rate.num)
3085  if (ist && !ost->frame_rate.num)
3086  ost->frame_rate = ist->framerate;
3087  if (ist && !ost->frame_rate.num)
3088  ost->frame_rate = ist->st->r_frame_rate;
3089  if (ist && !ost->frame_rate.num) {
3090  ost->frame_rate = (AVRational){25, 1};
3092  "No information "
3093  "about the input framerate is available. Falling "
3094  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3095  "if you want a different framerate.\n",
3096  ost->file_index, ost->index);
3097  }
3098 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3099  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3100  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3101  ost->frame_rate = ost->enc->supported_framerates[idx];
3102  }
3103  // reduce frame rate for mpeg4 to be within the spec limits
3104  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3105  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3106  ost->frame_rate.num, ost->frame_rate.den, 65535);
3107  }
3108  }
3109 
3110  switch (enc_ctx->codec_type) {
3111  case AVMEDIA_TYPE_AUDIO:
3112  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3113  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3114  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3115  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3116  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3117  break;
3118  case AVMEDIA_TYPE_VIDEO:
3119  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3120  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3121  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3122  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3124  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3125  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3126  }
3127  for (j = 0; j < ost->forced_kf_count; j++)
3128  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3130  enc_ctx->time_base);
3131 
3132  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3133  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3134  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3135  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3136  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3138  if (!strncmp(ost->enc->name, "libx264", 7) &&
3139  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3142  "No pixel format specified, %s for H.264 encoding chosen.\n"
3143  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3145  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3146  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3149  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3150  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3152  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3153 
3154  ost->st->avg_frame_rate = ost->frame_rate;
3155 
3156  if (!dec_ctx ||
3157  enc_ctx->width != dec_ctx->width ||
3158  enc_ctx->height != dec_ctx->height ||
3159  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3160  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3161  }
3162 
3163  if (ost->forced_keyframes) {
3164  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3167  if (ret < 0) {
3169  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3170  return ret;
3171  }
3176 
3177  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3178  // parse it only for static kf timings
3179  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3181  }
3182  }
3183  break;
3184  case AVMEDIA_TYPE_SUBTITLE:
3185  enc_ctx->time_base = (AVRational){1, 1000};
3186  if (!enc_ctx->width) {
3187  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
3188  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
3189  }
3190  break;
3191  case AVMEDIA_TYPE_DATA:
3192  break;
3193  default:
3194  abort();
3195  break;
3196  }
3197  }
3198 
3199  if (ost->disposition) {
3200  static const AVOption opts[] = {
3201  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3202  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3203  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3204  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3205  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3206  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3207  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3208  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3209  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3210  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3211  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3212  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3213  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3214  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3215  { NULL },
3216  };
3217  static const AVClass class = {
3218  .class_name = "",
3219  .item_name = av_default_item_name,
3220  .option = opts,
3221  .version = LIBAVUTIL_VERSION_INT,
3222  };
3223  const AVClass *pclass = &class;
3224 
3225  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3226  if (ret < 0)
3227  goto dump_format;
3228  }
3229  }
3230 
3231  /* init input streams */
3232  for (i = 0; i < nb_input_streams; i++)
3233  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3234  for (i = 0; i < nb_output_streams; i++) {
3235  ost = output_streams[i];
3236  avcodec_close(ost->enc_ctx);
3237  }
3238  goto dump_format;
3239  }
3240 
3241  /* open each encoder */
3242  for (i = 0; i < nb_output_streams; i++) {
3243  ret = init_output_stream(output_streams[i], error, sizeof(error));
3244  if (ret < 0)
3245  goto dump_format;
3246  }
3247 
3248  /* discard unused programs */
3249  for (i = 0; i < nb_input_files; i++) {
3250  InputFile *ifile = input_files[i];
3251  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3252  AVProgram *p = ifile->ctx->programs[j];
3253  int discard = AVDISCARD_ALL;
3254 
3255  for (k = 0; k < p->nb_stream_indexes; k++)
3256  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3257  discard = AVDISCARD_DEFAULT;
3258  break;
3259  }
3260  p->discard = discard;
3261  }
3262  }
3263 
3264  /* open files and write file headers */
3265  for (i = 0; i < nb_output_files; i++) {
3266  oc = output_files[i]->ctx;
3267  oc->interrupt_callback = int_cb;
3268  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3269  snprintf(error, sizeof(error),
3270  "Could not write header for output file #%d "
3271  "(incorrect codec parameters ?): %s",
3272  i, av_err2str(ret));
3273  ret = AVERROR(EINVAL);
3274  goto dump_format;
3275  }
3276 // assert_avoptions(output_files[i]->opts);
3277  if (strcmp(oc->oformat->name, "rtp")) {
3278  want_sdp = 0;
3279  }
3280  }
3281 
3282  dump_format:
3283  /* dump the file output parameters - cannot be done before in case
3284  of stream copy */
3285  for (i = 0; i < nb_output_files; i++) {
3286  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3287  }
3288 
3289  /* dump the stream mapping */
3290  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3291  for (i = 0; i < nb_input_streams; i++) {
3292  ist = input_streams[i];
3293 
3294  for (j = 0; j < ist->nb_filters; j++) {
3295  if (ist->filters[j]->graph->graph_desc) {
3296  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3297  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3298  ist->filters[j]->name);
3299  if (nb_filtergraphs > 1)
3300  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3301  av_log(NULL, AV_LOG_INFO, "\n");
3302  }
3303  }
3304  }
3305 
3306  for (i = 0; i < nb_output_streams; i++) {
3307  ost = output_streams[i];
3308 
3309  if (ost->attachment_filename) {
3310  /* an attached file */
3311  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3312  ost->attachment_filename, ost->file_index, ost->index);
3313  continue;
3314  }
3315 
3316  if (ost->filter && ost->filter->graph->graph_desc) {
3317  /* output from a complex graph */
3318  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3319  if (nb_filtergraphs > 1)
3320  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3321 
3322  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3323  ost->index, ost->enc ? ost->enc->name : "?");
3324  continue;
3325  }
3326 
3327  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3328  input_streams[ost->source_index]->file_index,
3329  input_streams[ost->source_index]->st->index,
3330  ost->file_index,
3331  ost->index);
3332  if (ost->sync_ist != input_streams[ost->source_index])
3333  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3334  ost->sync_ist->file_index,
3335  ost->sync_ist->st->index);
3336  if (ost->stream_copy)
3337  av_log(NULL, AV_LOG_INFO, " (copy)");
3338  else {
3339  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3340  const AVCodec *out_codec = ost->enc;
3341  const char *decoder_name = "?";
3342  const char *in_codec_name = "?";
3343  const char *encoder_name = "?";
3344  const char *out_codec_name = "?";
3345  const AVCodecDescriptor *desc;
3346 
3347  if (in_codec) {
3348  decoder_name = in_codec->name;
3349  desc = avcodec_descriptor_get(in_codec->id);
3350  if (desc)
3351  in_codec_name = desc->name;
3352  if (!strcmp(decoder_name, in_codec_name))
3353  decoder_name = "native";
3354  }
3355 
3356  if (out_codec) {
3357  encoder_name = out_codec->name;
3358  desc = avcodec_descriptor_get(out_codec->id);
3359  if (desc)
3360  out_codec_name = desc->name;
3361  if (!strcmp(encoder_name, out_codec_name))
3362  encoder_name = "native";
3363  }
3364 
3365  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3366  in_codec_name, decoder_name,
3367  out_codec_name, encoder_name);
3368  }
3369  av_log(NULL, AV_LOG_INFO, "\n");
3370  }
3371 
3372  if (ret) {
3373  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3374  return ret;
3375  }
3376 
3377  if (sdp_filename || want_sdp) {
3378  print_sdp();
3379  }
3380 
3381  transcode_init_done = 1;
3382 
3383  return 0;
3384 }
3385 
3386 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3387 static int need_output(void)
3388 {
3389  int i;
3390 
3391  for (i = 0; i < nb_output_streams; i++) {
3392  OutputStream *ost = output_streams[i];
3393  OutputFile *of = output_files[ost->file_index];
3394  AVFormatContext *os = output_files[ost->file_index]->ctx;
3395 
3396  if (ost->finished ||
3397  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3398  continue;
3399  if (ost->frame_number >= ost->max_frames) {
3400  int j;
3401  for (j = 0; j < of->ctx->nb_streams; j++)
3402  close_output_stream(output_streams[of->ost_index + j]);
3403  continue;
3404  }
3405 
3406  return 1;
3407  }
3408 
3409  return 0;
3410 }
3411 
3412 /**
3413  * Select the output stream to process.
3414  *
3415  * @return selected output stream, or NULL if none available
3416  */
3418 {
3419  int i;
3420  int64_t opts_min = INT64_MAX;
3421  OutputStream *ost_min = NULL;
3422 
3423  for (i = 0; i < nb_output_streams; i++) {
3424  OutputStream *ost = output_streams[i];
3425  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3426  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3427  AV_TIME_BASE_Q);
3428  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3429  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3430 
3431  if (!ost->finished && opts < opts_min) {
3432  opts_min = opts;
3433  ost_min = ost->unavailable ? NULL : ost;
3434  }
3435  }
3436  return ost_min;
3437 }
3438 
3439 static void set_tty_echo(int on)
3440 {
3441 #if HAVE_TERMIOS_H
3442  struct termios tty;
3443  if (tcgetattr(0, &tty) == 0) {
3444  if (on) tty.c_lflag |= ECHO;
3445  else tty.c_lflag &= ~ECHO;
3446  tcsetattr(0, TCSANOW, &tty);
3447  }
3448 #endif
3449 }
3450 
3452 {
3453  int i, ret, key;
3454  static int64_t last_time;
3455  if (received_nb_signals)
3456  return AVERROR_EXIT;
3457  /* read_key() returns 0 on EOF */
3458  if(cur_time - last_time >= 100000 && !run_as_daemon){
3459  key = read_key();
3460  last_time = cur_time;
3461  }else
3462  key = -1;
3463  if (key == 'q')
3464  return AVERROR_EXIT;
3465  if (key == '+') av_log_set_level(av_log_get_level()+10);
3466  if (key == '-') av_log_set_level(av_log_get_level()-10);
3467  if (key == 's') qp_hist ^= 1;
3468  if (key == 'h'){
3469  if (do_hex_dump){
3470  do_hex_dump = do_pkt_dump = 0;
3471  } else if(do_pkt_dump){
3472  do_hex_dump = 1;
3473  } else
3474  do_pkt_dump = 1;
3476  }
3477  if (key == 'c' || key == 'C'){
3478  char buf[4096], target[64], command[256], arg[256] = {0};
3479  double time;
3480  int k, n = 0;
3481  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3482  i = 0;
3483  set_tty_echo(1);
3484  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3485  if (k > 0)
3486  buf[i++] = k;
3487  buf[i] = 0;
3488  set_tty_echo(0);
3489  fprintf(stderr, "\n");
3490  if (k > 0 &&
3491  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3492  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3493  target, time, command, arg);
3494  for (i = 0; i < nb_filtergraphs; i++) {
3495  FilterGraph *fg = filtergraphs[i];
3496  if (fg->graph) {
3497  if (time < 0) {
3498  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3499  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3500  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3501  } else if (key == 'c') {
3502  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3503  ret = AVERROR_PATCHWELCOME;
3504  } else {
3505  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3506  if (ret < 0)
3507  fprintf(stderr, "Queing command failed with error %s\n", av_err2str(ret));
3508  }
3509  }
3510  }
3511  } else {
3513  "Parse error, at least 3 arguments were expected, "
3514  "only %d given in string '%s'\n", n, buf);
3515  }
3516  }
3517  if (key == 'd' || key == 'D'){
3518  int debug=0;
3519  if(key == 'D') {
3520  debug = input_streams[0]->st->codec->debug<<1;
3521  if(!debug) debug = 1;
3522  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3523  debug += debug;
3524  }else{
3525  char buf[32];
3526  int k = 0;
3527  i = 0;
3528  set_tty_echo(1);
3529  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3530  if (k > 0)
3531  buf[i++] = k;
3532  buf[i] = 0;
3533  set_tty_echo(0);
3534  fprintf(stderr, "\n");
3535  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3536  fprintf(stderr,"error parsing debug value\n");
3537  }
3538  for(i=0;i<nb_input_streams;i++) {
3539  input_streams[i]->st->codec->debug = debug;
3540  }
3541  for(i=0;i<nb_output_streams;i++) {
3542  OutputStream *ost = output_streams[i];
3543  ost->enc_ctx->debug = debug;
3544  }
3545  if(debug) av_log_set_level(AV_LOG_DEBUG);
3546  fprintf(stderr,"debug=%d\n", debug);
3547  }
3548  if (key == '?'){
3549  fprintf(stderr, "key function\n"
3550  "? show this help\n"
3551  "+ increase verbosity\n"
3552  "- decrease verbosity\n"
3553  "c Send command to first matching filter supporting it\n"
3554  "C Send/Que command to all matching filters\n"
3555  "D cycle through available debug modes\n"
3556  "h dump packets/hex press to cycle through the 3 states\n"
3557  "q quit\n"
3558  "s Show QP histogram\n"
3559  );
3560  }
3561  return 0;
3562 }
3563 
3564 #if HAVE_PTHREADS
3565 static void *input_thread(void *arg)
3566 {
3567  InputFile *f = arg;
3568  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3569  int ret = 0;
3570 
3571  while (1) {
3572  AVPacket pkt;
3573  ret = av_read_frame(f->ctx, &pkt);
3574 
3575  if (ret == AVERROR(EAGAIN)) {
3576  av_usleep(10000);
3577  continue;
3578  }
3579  if (ret < 0) {
3580  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3581  break;
3582  }
3583  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3584  if (flags && ret == AVERROR(EAGAIN)) {
3585  flags = 0;
3586  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3588  "Thread message queue blocking; consider raising the "
3589  "thread_queue_size option (current value: %d)\n",
3590  f->thread_queue_size);
3591  }
3592  if (ret < 0) {
3593  if (ret != AVERROR_EOF)
3594  av_log(f->ctx, AV_LOG_ERROR,
3595  "Unable to send packet to main thread: %s\n",
3596  av_err2str(ret));
3597  av_packet_unref(&pkt);
3598  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3599  break;
3600  }
3601  }
3602 
3603  return NULL;
3604 }
3605 
3606 static void free_input_threads(void)
3607 {
3608  int i;
3609 
3610  for (i = 0; i < nb_input_files; i++) {
3611  InputFile *f = input_files[i];
3612  AVPacket pkt;
3613 
3614  if (!f || !f->in_thread_queue)
3615  continue;
3617  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3618  av_packet_unref(&pkt);
3619 
3620  pthread_join(f->thread, NULL);
3621  f->joined = 1;
3622  av_thread_message_queue_free(&f->in_thread_queue);
3623  }
3624 }
3625 
3626 static int init_input_threads(void)
3627 {
3628  int i, ret;
3629 
3630  if (nb_input_files == 1)
3631  return 0;
3632 
3633  for (i = 0; i < nb_input_files; i++) {
3634  InputFile *f = input_files[i];
3635 
3636  if (f->ctx->pb ? !f->ctx->pb->seekable :
3637  strcmp(f->ctx->iformat->name, "lavfi"))
3638  f->non_blocking = 1;
3639  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3640  f->thread_queue_size, sizeof(AVPacket));
3641  if (ret < 0)
3642  return ret;
3643 
3644  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3645  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3646  av_thread_message_queue_free(&f->in_thread_queue);
3647  return AVERROR(ret);
3648  }
3649  }
3650  return 0;
3651 }
3652 
3653 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3654 {
3655  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3656  f->non_blocking ?
3658 }
3659 #endif
3660 
3662 {
3663  if (f->rate_emu) {
3664  int i;
3665  for (i = 0; i < f->nb_streams; i++) {
3666  InputStream *ist = input_streams[f->ist_index + i];
3667  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3668  int64_t now = av_gettime_relative() - ist->start;
3669  if (pts > now)
3670  return AVERROR(EAGAIN);
3671  }
3672  }
3673 
3674 #if HAVE_PTHREADS
3675  if (nb_input_files > 1)
3676  return get_input_packet_mt(f, pkt);
3677 #endif
3678  return av_read_frame(f->ctx, pkt);
3679 }
3680 
3681 static int got_eagain(void)
3682 {
3683  int i;
3684  for (i = 0; i < nb_output_streams; i++)
3685  if (output_streams[i]->unavailable)
3686  return 1;
3687  return 0;
3688 }
3689 
3690 static void reset_eagain(void)
3691 {
3692  int i;
3693  for (i = 0; i < nb_input_files; i++)
3694  input_files[i]->eagain = 0;
3695  for (i = 0; i < nb_output_streams; i++)
3696  output_streams[i]->unavailable = 0;
3697 }
3698 
3699 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3700 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3701  AVRational time_base)
3702 {
3703  int ret;
3704 
3705  if (!*duration) {
3706  *duration = tmp;
3707  return tmp_time_base;
3708  }
3709 
3710  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3711  if (ret < 0) {
3712  *duration = tmp;
3713  return tmp_time_base;
3714  }
3715 
3716  return time_base;
3717 }
3718 
3719 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3720 {
3721  InputStream *ist;
3722  AVCodecContext *avctx;
3723  int i, ret, has_audio = 0;
3724  int64_t duration = 0;
3725 
3726  ret = av_seek_frame(is, -1, is->start_time, 0);
3727  if (ret < 0)
3728  return ret;
3729 
3730  for (i = 0; i < ifile->nb_streams; i++) {
3731  ist = input_streams[ifile->ist_index + i];
3732  avctx = ist->dec_ctx;
3733 
3734  // flush decoders
3735  if (ist->decoding_needed) {
3736  process_input_packet(ist, NULL, 1);
3737  avcodec_flush_buffers(avctx);
3738  }
3739 
3740  /* duration is the length of the last frame in a stream
3741  * when audio stream is present we don't care about
3742  * last video frame length because it's not defined exactly */
3743  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3744  has_audio = 1;
3745  }
3746 
3747  for (i = 0; i < ifile->nb_streams; i++) {
3748  ist = input_streams[ifile->ist_index + i];
3749  avctx = ist->dec_ctx;
3750 
3751  if (has_audio) {
3752  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3753  AVRational sample_rate = {1, avctx->sample_rate};
3754 
3755  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3756  } else
3757  continue;
3758  } else {
3759  if (ist->framerate.num) {
3760  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3761  } else if (ist->st->avg_frame_rate.num) {
3762  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3763  } else duration = 1;
3764  }
3765  if (!ifile->duration)
3766  ifile->time_base = ist->st->time_base;
3767  /* the total duration of the stream, max_pts - min_pts is
3768  * the duration of the stream without the last frame */
3769  duration += ist->max_pts - ist->min_pts;
3770  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3771  ifile->time_base);
3772  }
3773 
3774  if (ifile->loop > 0)
3775  ifile->loop--;
3776 
3777  return ret;
3778 }
3779 
3780 /*
3781  * Return
3782  * - 0 -- one packet was read and processed
3783  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3784  * this function should be called again
3785  * - AVERROR_EOF -- this function should not be called again
3786  */
3787 static int process_input(int file_index)
3788 {
3789  InputFile *ifile = input_files[file_index];
3790  AVFormatContext *is;
3791  InputStream *ist;
3792  AVPacket pkt;
3793  int ret, i, j;
3794  int64_t duration;
3795  int64_t pkt_dts;
3796 
3797  is = ifile->ctx;
3798  ret = get_input_packet(ifile, &pkt);
3799 
3800  if (ret == AVERROR(EAGAIN)) {
3801  ifile->eagain = 1;
3802  return ret;
3803  }
3804  if (ret < 0 && ifile->loop) {
3805  if ((ret = seek_to_start(ifile, is)) < 0)
3806  return ret;
3807  ret = get_input_packet(ifile, &pkt);
3808  }
3809  if (ret < 0) {
3810  if (ret != AVERROR_EOF) {
3811  print_error(is->filename, ret);
3812  if (exit_on_error)
3813  exit_program(1);
3814  }
3815 
3816  for (i = 0; i < ifile->nb_streams; i++) {
3817  ist = input_streams[ifile->ist_index + i];
3818  if (ist->decoding_needed) {
3819  ret = process_input_packet(ist, NULL, 0);
3820  if (ret>0)
3821  return 0;
3822  }
3823 
3824  /* mark all outputs that don't go through lavfi as finished */
3825  for (j = 0; j < nb_output_streams; j++) {
3826  OutputStream *ost = output_streams[j];
3827 
3828  if (ost->source_index == ifile->ist_index + i &&
3829  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3830  finish_output_stream(ost);
3831  }
3832  }
3833 
3834  ifile->eof_reached = 1;
3835  return AVERROR(EAGAIN);
3836  }
3837 
3838  reset_eagain();
3839 
3840  if (do_pkt_dump) {
3842  is->streams[pkt.stream_index]);
3843  }
3844  /* the following test is needed in case new streams appear
3845  dynamically in stream : we ignore them */
3846  if (pkt.stream_index >= ifile->nb_streams) {
3847  report_new_stream(file_index, &pkt);
3848  goto discard_packet;
3849  }
3850 
3851  ist = input_streams[ifile->ist_index + pkt.stream_index];
3852 
3853  ist->data_size += pkt.size;
3854  ist->nb_packets++;
3855 
3856  if (ist->discard)
3857  goto discard_packet;
3858 
3859  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
3860  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
3861  exit_program(1);
3862  }
3863 
3864  if (debug_ts) {
3865  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3866  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3870  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3871  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3872  av_ts2str(input_files[ist->file_index]->ts_offset),
3873  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3874  }
3875 
3876  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3877  int64_t stime, stime2;
3878  // Correcting starttime based on the enabled streams
3879  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3880  // so we instead do it here as part of discontinuity handling
3881  if ( ist->next_dts == AV_NOPTS_VALUE
3882  && ifile->ts_offset == -is->start_time
3883  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3884  int64_t new_start_time = INT64_MAX;
3885  for (i=0; i<is->nb_streams; i++) {
3886  AVStream *st = is->streams[i];
3887  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3888  continue;
3889  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3890  }
3891  if (new_start_time > is->start_time) {
3892  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3893  ifile->ts_offset = -new_start_time;
3894  }
3895  }
3896 
3897  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3898  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3899  ist->wrap_correction_done = 1;
3900 
3901  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3902  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3903  ist->wrap_correction_done = 0;
3904  }
3905  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3906  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3907  ist->wrap_correction_done = 0;
3908  }
3909  }
3910 
3911  /* add the stream-global side data to the first packet */
3912  if (ist->nb_packets == 1) {
3913  if (ist->st->nb_side_data)
3915  for (i = 0; i < ist->st->nb_side_data; i++) {
3916  AVPacketSideData *src_sd = &ist->st->side_data[i];
3917  uint8_t *dst_data;
3918 
3919  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3920  continue;
3921  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3922  continue;
3923 
3924  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3925  if (!dst_data)
3926  exit_program(1);
3927 
3928  memcpy(dst_data, src_sd->data, src_sd->size);
3929  }
3930  }
3931 
3932  if (pkt.dts != AV_NOPTS_VALUE)
3933  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3934  if (pkt.pts != AV_NOPTS_VALUE)
3935  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3936 
3937  if (pkt.pts != AV_NOPTS_VALUE)
3938  pkt.pts *= ist->ts_scale;
3939  if (pkt.dts != AV_NOPTS_VALUE)
3940  pkt.dts *= ist->ts_scale;
3941 
3943  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3945  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3946  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3947  int64_t delta = pkt_dts - ifile->last_ts;
3948  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3949  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3950  ifile->ts_offset -= delta;
3952  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3953  delta, ifile->ts_offset);
3954  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3955  if (pkt.pts != AV_NOPTS_VALUE)
3956  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3957  }
3958  }
3959 
3960  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
3961  if (pkt.pts != AV_NOPTS_VALUE) {
3962  pkt.pts += duration;
3963  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
3964  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
3965  }
3966 
3967  if (pkt.dts != AV_NOPTS_VALUE)
3968  pkt.dts += duration;
3969 
3971  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3973  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3974  !copy_ts) {
3975  int64_t delta = pkt_dts - ist->next_dts;
3976  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3977  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3978  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3979  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3980  ifile->ts_offset -= delta;
3982  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3983  delta, ifile->ts_offset);
3984  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3985  if (pkt.pts != AV_NOPTS_VALUE)
3986  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3987  }
3988  } else {
3989  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3990  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3991  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3992  pkt.dts = AV_NOPTS_VALUE;
3993  }
3994  if (pkt.pts != AV_NOPTS_VALUE){
3995  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3996  delta = pkt_pts - ist->next_dts;
3997  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3998  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3999  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4000  pkt.pts = AV_NOPTS_VALUE;
4001  }
4002  }
4003  }
4004  }
4005 
4006  if (pkt.dts != AV_NOPTS_VALUE)
4007  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4008 
4009  if (debug_ts) {
4010  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4012  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4013  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4014  av_ts2str(input_files[ist->file_index]->ts_offset),
4015  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4016  }
4017 
4018  sub2video_heartbeat(ist, pkt.pts);
4019 
4020  process_input_packet(ist, &pkt, 0);
4021 
4022 discard_packet:
4023  av_packet_unref(&pkt);
4024 
4025  return 0;
4026 }
4027 
4028 /**
4029  * Perform a step of transcoding for the specified filter graph.
4030  *
4031  * @param[in] graph filter graph to consider
4032  * @param[out] best_ist input stream where a frame would allow to continue
4033  * @return 0 for success, <0 for error
4034  */
4035 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4036 {
4037  int i, ret;
4038  int nb_requests, nb_requests_max = 0;
4039  InputFilter *ifilter;
4040  InputStream *ist;
4041 
4042  *best_ist = NULL;
4043  ret = avfilter_graph_request_oldest(graph->graph);
4044  if (ret >= 0)
4045  return reap_filters(0);
4046 
4047  if (ret == AVERROR_EOF) {
4048  ret = reap_filters(1);
4049  for (i = 0; i < graph->nb_outputs; i++)
4050  close_output_stream(graph->outputs[i]->ost);
4051  return ret;
4052  }
4053  if (ret != AVERROR(EAGAIN))
4054  return ret;
4055 
4056  for (i = 0; i < graph->nb_inputs; i++) {
4057  ifilter = graph->inputs[i];
4058  ist = ifilter->ist;
4059  if (input_files[ist->file_index]->eagain ||
4060  input_files[ist->file_index]->eof_reached)
4061  continue;
4062  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4063  if (nb_requests > nb_requests_max) {
4064  nb_requests_max = nb_requests;
4065  *best_ist = ist;
4066  }
4067  }
4068 
4069  if (!*best_ist)
4070  for (i = 0; i < graph->nb_outputs; i++)
4071  graph->outputs[i]->ost->unavailable = 1;
4072 
4073  return 0;
4074 }
4075 
4076 /**
4077  * Run a single step of transcoding.
4078  *
4079  * @return 0 for success, <0 for error
4080  */
4081 static int transcode_step(void)
4082 {
4083  OutputStream *ost;
4084  InputStream *ist;
4085  int ret;
4086 
4087  ost = choose_output();
4088  if (!ost) {
4089  if (got_eagain()) {
4090  reset_eagain();
4091  av_usleep(10000);
4092  return 0;
4093  }
4094  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4095  return AVERROR_EOF;
4096  }
4097 
4098  if (ost->filter) {
4099  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4100  return ret;
4101  if (!ist)
4102  return 0;
4103  } else {
4104  av_assert0(ost->source_index >= 0);
4105  ist = input_streams[ost->source_index];
4106  }
4107 
4108  ret = process_input(ist->file_index);
4109  if (ret == AVERROR(EAGAIN)) {
4110  if (input_files[ist->file_index]->eagain)
4111  ost->unavailable = 1;
4112  return 0;
4113  }
4114 
4115  if (ret < 0)
4116  return ret == AVERROR_EOF ? 0 : ret;
4117 
4118  return reap_filters(0);
4119 }
4120 
4121 /*
4122  * The following code is the main loop of the file converter
4123  */
4124 static int transcode(void)
4125 {
4126  int ret, i;
4127  AVFormatContext *os;
4128  OutputStream *ost;
4129  InputStream *ist;
4130  int64_t timer_start;
4131  int64_t total_packets_written = 0;
4132 
4133  ret = transcode_init();
4134  if (ret < 0)
4135  goto fail;
4136 
4137  if (stdin_interaction) {
4138  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4139  }
4140 
4141  timer_start = av_gettime_relative();
4142 
4143 #if HAVE_PTHREADS
4144  if ((ret = init_input_threads()) < 0)
4145  goto fail;
4146 #endif
4147 
4148  while (!received_sigterm) {
4149  int64_t cur_time= av_gettime_relative();
4150 
4151  /* if 'q' pressed, exits */
4152  if (stdin_interaction)
4153  if (check_keyboard_interaction(cur_time) < 0)
4154  break;
4155 
4156  /* check if there's any stream where output is still needed */
4157  if (!need_output()) {
4158  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4159  break;
4160  }
4161 
4162  ret = transcode_step();
4163  if (ret < 0 && ret != AVERROR_EOF) {
4164  char errbuf[128];
4165  av_strerror(ret, errbuf, sizeof(errbuf));
4166 
4167  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4168  break;
4169  }
4170 
4171  /* dump report by using the output first video and audio streams */
4172  print_report(0, timer_start, cur_time);
4173  }
4174 #if HAVE_PTHREADS
4175  free_input_threads();
4176 #endif
4177 
4178  /* at the end of stream, we must flush the decoder buffers */
4179  for (i = 0; i < nb_input_streams; i++) {
4180  ist = input_streams[i];
4181  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4182  process_input_packet(ist, NULL, 0);
4183  }
4184  }
4185  flush_encoders();
4186 
4187  term_exit();
4188 
4189  /* write the trailer if needed and close file */
4190  for (i = 0; i < nb_output_files; i++) {
4191  os = output_files[i]->ctx;
4192  if ((ret = av_write_trailer(os)) < 0) {
4193  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4194  if (exit_on_error)
4195  exit_program(1);
4196  }
4197  }
4198 
4199  /* dump report by using the first video and audio streams */
4200  print_report(1, timer_start, av_gettime_relative());
4201 
4202  /* close each encoder */
4203  for (i = 0; i < nb_output_streams; i++) {
4204  ost = output_streams[i];
4205  if (ost->encoding_needed) {
4206  av_freep(&ost->enc_ctx->stats_in);
4207  }
4208  total_packets_written += ost->packets_written;
4209  }
4210 
4211  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4212  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4213  exit_program(1);
4214  }
4215 
4216  /* close each decoder */
4217  for (i = 0; i < nb_input_streams; i++) {
4218  ist = input_streams[i];
4219  if (ist->decoding_needed) {
4220  avcodec_close(ist->dec_ctx);
4221  if (ist->hwaccel_uninit)
4222  ist->hwaccel_uninit(ist->dec_ctx);
4223  }
4224  }
4225 
4227 
4228  /* finished ! */
4229  ret = 0;
4230 
4231  fail:
4232 #if HAVE_PTHREADS
4233  free_input_threads();
4234 #endif
4235 
4236  if (output_streams) {
4237  for (i = 0; i < nb_output_streams; i++) {
4238  ost = output_streams[i];
4239  if (ost) {
4240  if (ost->logfile) {
4241  if (fclose(ost->logfile))
4243  "Error closing logfile, loss of information possible: %s\n",
4244  av_err2str(AVERROR(errno)));
4245  ost->logfile = NULL;
4246  }
4247  av_freep(&ost->forced_kf_pts);
4248  av_freep(&ost->apad);
4249  av_freep(&ost->disposition);
4250  av_dict_free(&ost->encoder_opts);
4251  av_dict_free(&ost->sws_dict);
4252  av_dict_free(&ost->swr_opts);
4253  av_dict_free(&ost->resample_opts);
4254  }
4255  }
4256  }
4257  return ret;
4258 }
4259 
4260 
4261 static int64_t getutime(void)
4262 {
4263 #if HAVE_GETRUSAGE
4264  struct rusage rusage;
4265 
4266  getrusage(RUSAGE_SELF, &rusage);
4267  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4268 #elif HAVE_GETPROCESSTIMES
4269  HANDLE proc;
4270  FILETIME c, e, k, u;
4271  proc = GetCurrentProcess();
4272  GetProcessTimes(proc, &c, &e, &k, &u);
4273  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4274 #else
4275  return av_gettime_relative();
4276 #endif
4277 }
4278 
4279 static int64_t getmaxrss(void)
4280 {
4281 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4282  struct rusage rusage;
4283  getrusage(RUSAGE_SELF, &rusage);
4284  return (int64_t)rusage.ru_maxrss * 1024;
4285 #elif HAVE_GETPROCESSMEMORYINFO
4286  HANDLE proc;
4287  PROCESS_MEMORY_COUNTERS memcounters;
4288  proc = GetCurrentProcess();
4289  memcounters.cb = sizeof(memcounters);
4290  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4291  return memcounters.PeakPagefileUsage;
4292 #else
4293  return 0;
4294 #endif
4295 }
4296 
4297 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4298 {
4299 }
4300 
4301 int main(int argc, char **argv)
4302 {
4303  int ret;
4304  int64_t ti;
4305 
4307 
4308  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4309 
4311  parse_loglevel(argc, argv, options);
4312 
4313  if(argc>1 && !strcmp(argv[1], "-d")){
4314  run_as_daemon=1;
4316  argc--;
4317  argv++;
4318  }
4319 
4321 #if CONFIG_AVDEVICE
4323 #endif
4325  av_register_all();
4327 
4328  show_banner(argc, argv, options);
4329 
4330  term_init();
4331 
4332  /* parse options and open all input/output files */
4333  ret = ffmpeg_parse_options(argc, argv);
4334  if (ret < 0)
4335  exit_program(1);
4336 
4337  if (nb_output_files <= 0 && nb_input_files == 0) {
4338  show_usage();
4339  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4340  exit_program(1);
4341  }
4342 
4343  /* file converter / grab */
4344  if (nb_output_files <= 0) {
4345  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4346  exit_program(1);
4347  }
4348 
4349 // if (nb_input_files == 0) {
4350 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4351 // exit_program(1);
4352 // }
4353 
4354  current_time = ti = getutime();
4355  if (transcode() < 0)
4356  exit_program(1);
4357  ti = getutime() - ti;
4358  if (do_benchmark) {
4359  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4360  }
4361  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4364  exit_program(69);
4365 
4367  return main_return_code;
4368 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1528
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:279
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:892
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2911
int got_output
Definition: ffmpeg.h:308
#define AV_DISPOSITION_METADATA
Definition: avformat.h:860
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1798
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1058
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1911
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:409
const struct AVCodec * codec
Definition: avcodec.h:1658
Definition: ffmpeg.h:386
AVRational framerate
Definition: avcodec.h:3338
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:335
const char * s
Definition: avisynth_c.h:631
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:499
void term_init(void)
Definition: ffmpeg.c:366
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:238
int nb_outputs
Definition: ffmpeg.h:255
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:461
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:265
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2222
int resample_channels
Definition: ffmpeg.h:303
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:308
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:3044
int stream_copy
Definition: ffmpeg.h:466
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1083
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3864
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1577
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:431
int64_t * forced_kf_pts
Definition: ffmpeg.h:440
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:288
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2987
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:456
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:871
#define CODEC_FLAG_PASS2
Definition: avcodec.h:1077
static int process_input(int file_index)
Definition: ffmpeg.c:3787
int exit_on_error
Definition: ffmpeg_opt.c:115
const char * fmt
Definition: avisynth_c.h:632
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2597
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1714
#define LIBAVUTIL_VERSION_INT
Definition: version.h:70
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1600
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
const char * desc
Definition: nvenc.c:89
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:292
AVCodecParserContext * parser
Definition: ffmpeg.h:474
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:285
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2385
int decoding_needed
Definition: ffmpeg.h:263
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:943
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1507
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:435
int index
stream index in AVFormatContext
Definition: avformat.h:877
int size
Definition: avcodec.h:1581
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4279
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2585
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:607
#define AV_DISPOSITION_DUB
Definition: avformat.h:830
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2060
int eagain
Definition: ffmpeg.h:357
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
attribute_deprecated int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1776
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1133
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1877
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:419
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2281
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:604
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:842
int quality
Definition: ffmpeg.h:486
unsigned num_rects
Definition: avcodec.h:3902
AVFrame * filter_frame
Definition: ffmpeg.h:270
static int transcode_init(void)
Definition: ffmpeg.c:2813
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2592
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2283
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2825
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:108
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:844
int last_dropped
Definition: ffmpeg.h:425
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:658
discard all
Definition: avcodec.h:784
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:983
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:338
int64_t input_ts_offset
Definition: ffmpeg.h:363
int do_hex_dump
Definition: ffmpeg_opt.c:109
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3049
int nb_input_streams
Definition: ffmpeg.c:138
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:61
const char * name
Definition: ffmpeg.h:73
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2589
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2757
Picture data structure.
Definition: avcodec.h:3831
int profile
profile
Definition: avcodec.h:3153
uint64_t packets_written
Definition: ffmpeg.h:480
AVCodec.
Definition: avcodec.h:3542
#define VSYNC_VFR
Definition: ffmpeg.h:54
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2447
#define FF_INPUT_BUFFER_PADDING_SIZE
Definition: avcodec.h:744
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:494
int print_stats
Definition: ffmpeg_opt.c:117
float dts_error_threshold
Definition: ffmpeg_opt.c:100
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:500
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
uint64_t data_size
Definition: ffmpeg.h:478
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:452
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:834
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1786
Undefined.
Definition: avutil.h:265
AVSubtitleRect ** rects
Definition: avcodec.h:3903
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2475
int encoding_needed
Definition: ffmpeg.h:408
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:609
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4297
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3451
Format I/O context.
Definition: avformat.h:1325
uint64_t samples_decoded
Definition: ffmpeg.h:351
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:229
unsigned int nb_stream_indexes
Definition: avformat.h:1257
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:75
int64_t cur_dts
Definition: avformat.h:1059
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3866
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:981
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:313
uint64_t frames_decoded
Definition: ffmpeg.h:350
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:249
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:928
char * logfile_prefix
Definition: ffmpeg.h:451
static uint8_t * subtitle_out
Definition: ffmpeg.c:135
static int main_return_code
Definition: ffmpeg.c:318
static int64_t start_time
Definition: ffplay.c:330
int copy_initial_nonkeyframes
Definition: ffmpeg.h:468
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:112
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:541
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
Opaque data information usually continuous.
Definition: avutil.h:195
AVDictionary * sws_dict
Definition: ffmpeg.h:460
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:200
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:212
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3275
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:674
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int stdin_interaction
Definition: ffmpeg_opt.c:119
FILE * logfile
Definition: ffmpeg.h:452
AVDictionary * opts
Definition: ffmpeg.h:497
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:565
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1598
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1404
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2479
static int need_output(void)
Definition: ffmpeg.c:3387
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:374
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:987
static double psnr(double d)
Definition: ffmpeg.c:1233
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1764
int do_benchmark
Definition: ffmpeg_opt.c:107
int audio_sync_method
Definition: ffmpeg_opt.c:103
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
int shortest
Definition: ffmpeg.h:503
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1393
int64_t duration
Definition: movenc.c:63
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4261
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:112
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:370
uint8_t * data
Definition: avcodec.h:1580
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
static void set_tty_echo(int on)
Definition: ffmpeg.c:3439
AVDictionary * resample_opts
Definition: ffmpeg.h:462
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2694
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:235
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:3719
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4537
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:140
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:401
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1295
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:317
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:827
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1238
int resample_sample_rate
Definition: ffmpeg.h:302
uint8_t * data
Definition: avcodec.h:1524
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:318
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:488
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:306
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3867
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:511
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:3042
AVCodec * dec
Definition: ffmpeg.h:268
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1255
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2817
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:204
int top_field_first
Definition: ffmpeg.h:293
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1453
int nb_output_streams
Definition: ffmpeg.c:143
int file_index
Definition: ffmpeg.h:259
const OptionDef options[]
Definition: ffserver.c:3969
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5656
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2392
unsigned int * stream_index
Definition: avformat.h:1256
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:299
int resample_height
Definition: ffmpeg.h:297
int wrap_correction_done
Definition: ffmpeg.h:280
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:282
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:259
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:858
unsigned m
Definition: audioconvert.c:187
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1344
int64_t next_dts
Definition: ffmpeg.h:275
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1612
attribute_deprecated int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Definition: avpicture.c:37
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:511
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2959
static volatile int transcode_init_done
Definition: ffmpeg.c:316
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3556
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3603
int rate_emu
Definition: ffmpeg.h:373
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1971
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1276
static void reset_eagain(void)
Definition: ffmpeg.c:3690
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:635
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:340
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1631
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:625
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3049
FilterGraph ** filtergraphs
Definition: ffmpeg.c:147
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:457
AVFilterContext * filter
Definition: ffmpeg.h:228
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:337
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:486
int64_t start
Definition: ffmpeg.h:272
int loop
Definition: ffmpeg.h:359
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3865
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:348
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:418
int video_sync_method
Definition: ffmpeg_opt.c:104
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:130
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:178
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
char * sdp_filename
Definition: ffmpeg_opt.c:96
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:426
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2212
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: avcodec.h:3561
int initial_padding
Audio only.
Definition: avcodec.h:3329
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:130
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
unsigned int nb_programs
Definition: avformat.h:1478
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:202
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:515
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1744
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:539
int av_frame_get_decode_error_flags(const AVFrame *frame)
int cuvid_transcode_init(OutputStream *ost)
Definition: ffmpeg_cuvid.c:70
AVChapter ** chapters
Definition: avformat.h:1529
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:75
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1526
int av_log_get_level(void)
Get the current log level.
Definition: log.c:386
const char * name
Name of the codec implementation.
Definition: avcodec.h:3549
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:782
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:74
int side_data_elems
Definition: avcodec.h:1592
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:586
int force_fps
Definition: ffmpeg.h:433
int qsv_transcode_init(OutputStream *ost)
Definition: ffmpeg_qsv.c:183
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:954
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1252
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
int qp_hist
Definition: ffmpeg_opt.c:118
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:81
float frame_drop_threshold
Definition: ffmpeg_opt.c:105
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1035
int64_t error[4]
Definition: ffmpeg.h:492
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1586
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2956
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2461
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:147
int extradata_size
Size of the extradata content in bytes.
Definition: avcodec.h:3940
uint32_t end_display_time
Definition: avcodec.h:3901
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3904
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2625
OutputFilter * filter
Definition: ffmpeg.h:454
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:1931
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:349
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:437
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:833
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1566
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:841
common internal API header
Immediately push the frame to the output.
Definition: buffersrc.h:47
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1381
AVDictionary * opts
Definition: movenc.c:50
static int nb_frames_drop
Definition: ffmpeg.c:129
A bitmap, pict will be set.
Definition: avcodec.h:3846
int linesize[4]
Definition: avcodec.h:3882
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
int nb_output_files
Definition: ffmpeg.c:145
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:243
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:224
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:432
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
static int transcode(void)
Definition: ffmpeg.c:4124
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:883
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:404
char filename[1024]
input or output filename
Definition: avformat.h:1401
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:496
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3481
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:190
int saw_first_ts
Definition: ffmpeg.h:289
int abort_on_flags
Definition: ffmpeg_opt.c:116
This side data contains quality related information from the encoder.
Definition: avcodec.h:1428
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1947
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:236
int width
picture width / height.
Definition: avcodec.h:1836
PVOID HANDLE
char * apad
Definition: ffmpeg.h:463
AVBufferRef * hw_frames_ctx
Encoding only.
Definition: avcodec.h:3495
attribute_deprecated void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
int64_t nb_samples
Definition: ffmpeg.h:286
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:445
int64_t duration
Definition: ffmpeg.h:360
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:396
const char * name
Definition: avformat.h:522
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:235
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:862
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:846
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2364
int nb_filtergraphs
Definition: ffmpeg.c:148
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:88
int64_t last_ts
Definition: ffmpeg.h:366
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:294
int do_pkt_dump
Definition: ffmpeg_opt.c:110
int64_t max_frames
Definition: ffmpeg.h:422
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:339
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:300
int audio_channels_mapped
Definition: ffmpeg.h:449
int n
Definition: avisynth_c.h:547
AVDictionary * metadata
Definition: avformat.h:945
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1795
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:593
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:192
Opaque data information usually sparse.
Definition: avutil.h:197
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define src
Definition: vp9dsp.c:530
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3881
static int got_eagain(void)
Definition: ffmpeg.c:3681
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:194
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:225
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:142
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:3082
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:117
AVCodecContext * enc
Definition: muxing.c:55
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:411
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:843
int ret
Definition: ffmpeg.h:309
int audio_volume
Definition: ffmpeg_opt.c:102
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:876
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3349
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:484
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
InputFilter ** filters
Definition: ffmpeg.h:324
int fix_sub_duration
Definition: ffmpeg.h:306
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:369
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4550
Definition: ffmpeg.h:72
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2430
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:74
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:829
AVStream * st
Definition: ffmpeg.h:260
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:166
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2772
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int frame_size
Definition: mxfenc.c:1821
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:51
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:859
int ost_index
Definition: ffmpeg.h:498
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:723
enum AVMediaType codec_type
Definition: avcodec.h:1657
double ts_scale
Definition: ffmpeg.h:288
int unavailable
Definition: ffmpeg.h:465
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
attribute_deprecated int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2180
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2536
enum AVCodecID codec_id
Definition: avcodec.h:1666
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:318
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1561
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:181
float max_error_rate
Definition: ffmpeg_opt.c:121
int sample_rate
samples per second
Definition: avcodec.h:2410
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
uint64_t frames_encoded
Definition: ffmpeg.h:482
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2072
AVIOContext * pb
I/O context.
Definition: avformat.h:1367
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int ist_index
Definition: ffmpeg.h:358
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:547
static int loop
Definition: ffplay.c:339
int debug
debug
Definition: avcodec.h:2888
static void print_sdp(void)
Definition: ffmpeg.c:2440
const char * graph_desc
Definition: ffmpeg.h:247
int guess_layout_max
Definition: ffmpeg.h:294
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int64_t start_time
Definition: ffmpeg.h:367
Keep a reference to the frame.
Definition: buffersrc.h:54
main external API structure.
Definition: avcodec.h:1649
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:563
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:318
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:459
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:767
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2694
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
const char * attachment_filename
Definition: ffmpeg.h:467
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1681
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1781
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
AVRational time_base
Definition: ffmpeg.h:362
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:595
AVCodecContext * enc_ctx
Definition: ffmpeg.h:420
void * buf
Definition: avisynth_c.h:553
AVFrame * decoded_frame
Definition: ffmpeg.h:269
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1765
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4035
int nb_coded_side_data
Definition: avcodec.h:3482
int * audio_channels_map
Definition: ffmpeg.h:448
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:344
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1927
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:142
int index
Definition: gxfenc.c:89
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1006
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2378
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2900
rational number numerator/denominator
Definition: rational.h:43
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2371
attribute_deprecated int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:1922
int file_index
Definition: ffmpeg.h:404
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:394
static int current_time
Definition: ffmpeg.c:132
int64_t sync_opts
Definition: ffmpeg.h:413
char * vstats_filename
Definition: ffmpeg_opt.c:95
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:146
AVCodecContext * dec_ctx
Definition: ffmpeg.h:267
char * disposition
Definition: ffmpeg.h:470
struct InputStream::@24 prev_sub
#define mid_pred
Definition: mathops.h:96
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:779
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1051
int nb_streams_warn
Definition: ffmpeg.h:372
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2565
AVDictionary * decoder_opts
Definition: ffmpeg.h:291
int autorotate
Definition: ffmpeg.h:296
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:665
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1208
int showed_multi_packet_warning
Definition: ffmpeg.h:290
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:120
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4001
#define u(width,...)
int64_t ts_offset
Definition: ffmpeg.h:365
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:266
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:844
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4081
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:457
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3648
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1631
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:3700
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:271
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:657
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:273
AVFrame * filtered_frame
Definition: ffmpeg.h:423
int source_index
Definition: ffmpeg.h:406
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:270
static volatile int received_nb_signals
Definition: ffmpeg.c:315
int copy_prior_start
Definition: ffmpeg.h:469
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:484
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1730
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:626
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:325
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2488
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1410
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:193
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:444
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:276
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2357
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
int resample_sample_fmt
Definition: ffmpeg.h:301
int forced_kf_count
Definition: ffmpeg.h:441
int64_t start
Definition: avformat.h:1285
OSTFinished finished
Definition: ffmpeg.h:464
char * forced_keyframes
Definition: ffmpeg.h:443
uint64_t data_size
Definition: ffmpeg.h:346
int resample_width
Definition: ffmpeg.h:298
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:278
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1034
struct FilterGraph * graph
Definition: ffmpeg.h:237
uint64_t limit_filesize
Definition: ffmpeg.h:501
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1390
AVIOContext * progress_avio
Definition: ffmpeg.c:133
if(ret< 0)
Definition: vf_mcdeint.c:282
int main(int argc, char **argv)
Definition: ffmpeg.c:4301
int reinit_filters
Definition: ffmpeg.h:327
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:476
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:263
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1027
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:923
static double c[64]
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:934
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3900
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1048
uint64_t samples_encoded
Definition: ffmpeg.h:483
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1284
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:208
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3108
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:932
char * key
Definition: dict.h:86
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
AVFrame * last_frame
Definition: ffmpeg.h:424
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1613
int copy_ts
Definition: ffmpeg_opt.c:111
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1337
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4037
AVFormatContext * ctx
Definition: ffmpeg.h:355
int pict_type
Definition: ffmpeg.h:489
AVBufferRef * hw_device_ctx
Definition: ffmpeg_opt.c:93
AVSubtitle subtitle
Definition: ffmpeg.h:310
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:731
int eof_reached
Definition: ffmpeg.h:356
Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE...
Definition: mathematics.h:76
int forced_kf_index
Definition: ffmpeg.h:442
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:795
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:487
char * avfilter
Definition: ffmpeg.h:455
uint8_t * name
Definition: ffmpeg.h:231
char * value
Definition: dict.h:87
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:323
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:99
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:713
int channels
number of audio channels
Definition: avcodec.h:2411
int top_field_first
Definition: ffmpeg.h:434
static uint8_t tmp[8]
Definition: des.c:38
OutputFilter ** outputs
Definition: ffmpeg.h:254
InputFile ** input_files
Definition: ffmpeg.c:139
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2526
void av_log_set_flags(int arg)
Definition: log.c:396
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:219
AVFormatContext * ctx
Definition: ffmpeg.h:496
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:832
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
Definition: avcodec.h:3936
void show_usage(void)
Definition: ffmpeg_opt.c:2999
An instance of a filter.
Definition: avfilter.h:305
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:331
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1579
AVDictionary * encoder_opts
Definition: ffmpeg.h:459
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1184
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4941
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:252
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2407
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:341
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:660
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:334
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2271
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2910
OutputFile ** output_files
Definition: ffmpeg.c:144
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Definition: avformat.h:1006
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1699
int copy_tb
Definition: ffmpeg_opt.c:113
int64_t min_pts
Definition: ffmpeg.h:284
static volatile int received_sigterm
Definition: ffmpeg.c:314
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:261
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3661
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2138
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:313
int stream_index
Definition: avcodec.h:1582
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:913
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:330
enum AVSubtitleType type
Definition: avcodec.h:3884
int64_t first_pts
Definition: ffmpeg.h:416
int nb_inputs
Definition: ffmpeg.h:253
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:936
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:222
#define DECODING_FOR_OST
Definition: ffmpeg.h:264
int index
Definition: ffmpeg.h:405
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1101
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
uint64_t resample_channel_layout
Definition: ffmpeg.h:304
OSTFinished
Definition: ffmpeg.h:398
This structure stores compressed data.
Definition: avcodec.h:1557
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:44
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1086
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2006
int delay
Codec delay.
Definition: avcodec.h:1819
int debug_ts
Definition: ffmpeg_opt.c:114
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3417
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sigterm_handler(int sig)
Definition: ffmpeg.c:321
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1573
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:122
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1706
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1479
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:831
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:137
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:68
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:2648
Definition: ffmpeg.h:390
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:771
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3274