FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
65 #include "libavcodec/mathops.h"
66 #include "libavformat/os_support.h"
67 
68 # include "libavfilter/avfilter.h"
69 # include "libavfilter/buffersrc.h"
70 # include "libavfilter/buffersink.h"
71 
72 #if HAVE_SYS_RESOURCE_H
73 #include <sys/time.h>
74 #include <sys/types.h>
75 #include <sys/resource.h>
76 #elif HAVE_GETPROCESSTIMES
77 #include <windows.h>
78 #endif
79 #if HAVE_GETPROCESSMEMORYINFO
80 #include <windows.h>
81 #include <psapi.h>
82 #endif
83 #if HAVE_SETCONSOLECTRLHANDLER
84 #include <windows.h>
85 #endif
86 
87 
88 #if HAVE_SYS_SELECT_H
89 #include <sys/select.h>
90 #endif
91 
92 #if HAVE_TERMIOS_H
93 #include <fcntl.h>
94 #include <sys/ioctl.h>
95 #include <sys/time.h>
96 #include <termios.h>
97 #elif HAVE_KBHIT
98 #include <conio.h>
99 #endif
100 
101 #if HAVE_PTHREADS
102 #include <pthread.h>
103 #endif
104 
105 #include <time.h>
106 
107 #include "ffmpeg.h"
108 #include "cmdutils.h"
109 
110 #include "libavutil/avassert.h"
111 
112 const char program_name[] = "ffmpeg";
113 const int program_birth_year = 2000;
114 
115 static FILE *vstats_file;
116 
117 const char *const forced_keyframes_const_names[] = {
118  "n",
119  "n_forced",
120  "prev_forced_n",
121  "prev_forced_t",
122  "t",
123  NULL
124 };
125 
126 static void do_video_stats(OutputStream *ost, int frame_size);
127 static int64_t getutime(void);
128 static int64_t getmaxrss(void);
130 
131 static int run_as_daemon = 0;
132 static int nb_frames_dup = 0;
133 static unsigned dup_warning = 1000;
134 static int nb_frames_drop = 0;
135 static int64_t decode_error_stat[2];
136 
137 static int want_sdp = 1;
138 
139 static int current_time;
141 
143 
148 
153 
156 
157 #if HAVE_TERMIOS_H
158 
159 /* init terminal so that we can grab keys */
160 static struct termios oldtty;
161 static int restore_tty;
162 #endif
163 
164 #if HAVE_PTHREADS
165 static void free_input_threads(void);
166 #endif
167 
168 /* sub2video hack:
169  Convert subtitles to video with alpha to insert them in filter graphs.
170  This is a temporary solution until libavfilter gets real subtitles support.
171  */
172 
174 {
175  int ret;
176  AVFrame *frame = ist->sub2video.frame;
177 
178  av_frame_unref(frame);
179  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
180  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
182  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
183  return ret;
184  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
185  return 0;
186 }
187 
188 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
189  AVSubtitleRect *r)
190 {
191  uint32_t *pal, *dst2;
192  uint8_t *src, *src2;
193  int x, y;
194 
195  if (r->type != SUBTITLE_BITMAP) {
196  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
197  return;
198  }
199  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
201  r->x, r->y, r->w, r->h, w, h
202  );
203  return;
204  }
205 
206  dst += r->y * dst_linesize + r->x * 4;
207  src = r->data[0];
208  pal = (uint32_t *)r->data[1];
209  for (y = 0; y < r->h; y++) {
210  dst2 = (uint32_t *)dst;
211  src2 = src;
212  for (x = 0; x < r->w; x++)
213  *(dst2++) = pal[*(src2++)];
214  dst += dst_linesize;
215  src += r->linesize[0];
216  }
217 }
218 
219 static void sub2video_push_ref(InputStream *ist, int64_t pts)
220 {
221  AVFrame *frame = ist->sub2video.frame;
222  int i;
223 
224  av_assert1(frame->data[0]);
225  ist->sub2video.last_pts = frame->pts = pts;
226  for (i = 0; i < ist->nb_filters; i++)
230 }
231 
233 {
234  AVFrame *frame = ist->sub2video.frame;
235  int8_t *dst;
236  int dst_linesize;
237  int num_rects, i;
238  int64_t pts, end_pts;
239 
240  if (!frame)
241  return;
242  if (sub) {
243  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
244  AV_TIME_BASE_Q, ist->st->time_base);
245  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
246  AV_TIME_BASE_Q, ist->st->time_base);
247  num_rects = sub->num_rects;
248  } else {
249  pts = ist->sub2video.end_pts;
250  end_pts = INT64_MAX;
251  num_rects = 0;
252  }
253  if (sub2video_get_blank_frame(ist) < 0) {
255  "Impossible to get a blank canvas.\n");
256  return;
257  }
258  dst = frame->data [0];
259  dst_linesize = frame->linesize[0];
260  for (i = 0; i < num_rects; i++)
261  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
262  sub2video_push_ref(ist, pts);
263  ist->sub2video.end_pts = end_pts;
264 }
265 
266 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
267 {
268  InputFile *infile = input_files[ist->file_index];
269  int i, j, nb_reqs;
270  int64_t pts2;
271 
272  /* When a frame is read from a file, examine all sub2video streams in
273  the same file and send the sub2video frame again. Otherwise, decoded
274  video frames could be accumulating in the filter graph while a filter
275  (possibly overlay) is desperately waiting for a subtitle frame. */
276  for (i = 0; i < infile->nb_streams; i++) {
277  InputStream *ist2 = input_streams[infile->ist_index + i];
278  if (!ist2->sub2video.frame)
279  continue;
280  /* subtitles seem to be usually muxed ahead of other streams;
281  if not, subtracting a larger time here is necessary */
282  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
283  /* do not send the heartbeat frame if the subtitle is already ahead */
284  if (pts2 <= ist2->sub2video.last_pts)
285  continue;
286  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
287  sub2video_update(ist2, NULL);
288  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
289  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
290  if (nb_reqs)
291  sub2video_push_ref(ist2, pts2);
292  }
293 }
294 
295 static void sub2video_flush(InputStream *ist)
296 {
297  int i;
298 
299  if (ist->sub2video.end_pts < INT64_MAX)
300  sub2video_update(ist, NULL);
301  for (i = 0; i < ist->nb_filters; i++)
303 }
304 
305 /* end of sub2video hack */
306 
307 static void term_exit_sigsafe(void)
308 {
309 #if HAVE_TERMIOS_H
310  if(restore_tty)
311  tcsetattr (0, TCSANOW, &oldtty);
312 #endif
313 }
314 
315 void term_exit(void)
316 {
317  av_log(NULL, AV_LOG_QUIET, "%s", "");
319 }
320 
321 static volatile int received_sigterm = 0;
322 static volatile int received_nb_signals = 0;
324 static volatile int ffmpeg_exited = 0;
325 static int main_return_code = 0;
326 
327 static void
329 {
330  received_sigterm = sig;
333  if(received_nb_signals > 3) {
334  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
335  strlen("Received > 3 system signals, hard exiting\n"));
336 
337  exit(123);
338  }
339 }
340 
341 #if HAVE_SETCONSOLECTRLHANDLER
342 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
343 {
344  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
345 
346  switch (fdwCtrlType)
347  {
348  case CTRL_C_EVENT:
349  case CTRL_BREAK_EVENT:
350  sigterm_handler(SIGINT);
351  return TRUE;
352 
353  case CTRL_CLOSE_EVENT:
354  case CTRL_LOGOFF_EVENT:
355  case CTRL_SHUTDOWN_EVENT:
356  sigterm_handler(SIGTERM);
357  /* Basically, with these 3 events, when we return from this method the
358  process is hard terminated, so stall as long as we need to
359  to try and let the main thread(s) clean up and gracefully terminate
360  (we have at most 5 seconds, but should be done far before that). */
361  while (!ffmpeg_exited) {
362  Sleep(0);
363  }
364  return TRUE;
365 
366  default:
367  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
368  return FALSE;
369  }
370 }
371 #endif
372 
373 void term_init(void)
374 {
375 #if HAVE_TERMIOS_H
377  struct termios tty;
378  if (tcgetattr (0, &tty) == 0) {
379  oldtty = tty;
380  restore_tty = 1;
381 
382  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
383  |INLCR|IGNCR|ICRNL|IXON);
384  tty.c_oflag |= OPOST;
385  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
386  tty.c_cflag &= ~(CSIZE|PARENB);
387  tty.c_cflag |= CS8;
388  tty.c_cc[VMIN] = 1;
389  tty.c_cc[VTIME] = 0;
390 
391  tcsetattr (0, TCSANOW, &tty);
392  }
393  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
394  }
395 #endif
396 
397  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
398  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
399 #ifdef SIGXCPU
400  signal(SIGXCPU, sigterm_handler);
401 #endif
402 #if HAVE_SETCONSOLECTRLHANDLER
403  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
404 #endif
405 }
406 
407 /* read a key without blocking */
408 static int read_key(void)
409 {
410  unsigned char ch;
411 #if HAVE_TERMIOS_H
412  int n = 1;
413  struct timeval tv;
414  fd_set rfds;
415 
416  FD_ZERO(&rfds);
417  FD_SET(0, &rfds);
418  tv.tv_sec = 0;
419  tv.tv_usec = 0;
420  n = select(1, &rfds, NULL, NULL, &tv);
421  if (n > 0) {
422  n = read(0, &ch, 1);
423  if (n == 1)
424  return ch;
425 
426  return n;
427  }
428 #elif HAVE_KBHIT
429 # if HAVE_PEEKNAMEDPIPE
430  static int is_pipe;
431  static HANDLE input_handle;
432  DWORD dw, nchars;
433  if(!input_handle){
434  input_handle = GetStdHandle(STD_INPUT_HANDLE);
435  is_pipe = !GetConsoleMode(input_handle, &dw);
436  }
437 
438  if (is_pipe) {
439  /* When running under a GUI, you will end here. */
440  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
441  // input pipe may have been closed by the program that ran ffmpeg
442  return -1;
443  }
444  //Read it
445  if(nchars != 0) {
446  read(0, &ch, 1);
447  return ch;
448  }else{
449  return -1;
450  }
451  }
452 # endif
453  if(kbhit())
454  return(getch());
455 #endif
456  return -1;
457 }
458 
459 static int decode_interrupt_cb(void *ctx)
460 {
462 }
463 
465 
466 static void ffmpeg_cleanup(int ret)
467 {
468  int i, j;
469 
470  if (do_benchmark) {
471  int maxrss = getmaxrss() / 1024;
472  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
473  }
474 
475  for (i = 0; i < nb_filtergraphs; i++) {
476  FilterGraph *fg = filtergraphs[i];
478  for (j = 0; j < fg->nb_inputs; j++) {
479  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
480  AVFrame *frame;
481  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
482  sizeof(frame), NULL);
483  av_frame_free(&frame);
484  }
485  av_fifo_freep(&fg->inputs[j]->frame_queue);
486  if (fg->inputs[j]->ist->sub2video.sub_queue) {
487  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
488  AVSubtitle sub;
490  &sub, sizeof(sub), NULL);
491  avsubtitle_free(&sub);
492  }
494  }
496  av_freep(&fg->inputs[j]->name);
497  av_freep(&fg->inputs[j]);
498  }
499  av_freep(&fg->inputs);
500  for (j = 0; j < fg->nb_outputs; j++) {
501  av_freep(&fg->outputs[j]->name);
502  av_freep(&fg->outputs[j]->formats);
503  av_freep(&fg->outputs[j]->channel_layouts);
504  av_freep(&fg->outputs[j]->sample_rates);
505  av_freep(&fg->outputs[j]);
506  }
507  av_freep(&fg->outputs);
508  av_freep(&fg->graph_desc);
509 
510  av_freep(&filtergraphs[i]);
511  }
512  av_freep(&filtergraphs);
513 
515 
516  /* close files */
517  for (i = 0; i < nb_output_files; i++) {
518  OutputFile *of = output_files[i];
520  if (!of)
521  continue;
522  s = of->ctx;
523  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
524  avio_closep(&s->pb);
526  av_dict_free(&of->opts);
527 
528  av_freep(&output_files[i]);
529  }
530  for (i = 0; i < nb_output_streams; i++) {
531  OutputStream *ost = output_streams[i];
532 
533  if (!ost)
534  continue;
535 
536  for (j = 0; j < ost->nb_bitstream_filters; j++)
537  av_bsf_free(&ost->bsf_ctx[j]);
538  av_freep(&ost->bsf_ctx);
539 
541  av_frame_free(&ost->last_frame);
542  av_dict_free(&ost->encoder_opts);
543 
544  av_parser_close(ost->parser);
546 
547  av_freep(&ost->forced_keyframes);
549  av_freep(&ost->avfilter);
550  av_freep(&ost->logfile_prefix);
551 
553  ost->audio_channels_mapped = 0;
554 
555  av_dict_free(&ost->sws_dict);
556 
559 
560  if (ost->muxing_queue) {
561  while (av_fifo_size(ost->muxing_queue)) {
562  AVPacket pkt;
563  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
564  av_packet_unref(&pkt);
565  }
567  }
568 
569  av_freep(&output_streams[i]);
570  }
571 #if HAVE_PTHREADS
572  free_input_threads();
573 #endif
574  for (i = 0; i < nb_input_files; i++) {
575  avformat_close_input(&input_files[i]->ctx);
576  av_freep(&input_files[i]);
577  }
578  for (i = 0; i < nb_input_streams; i++) {
579  InputStream *ist = input_streams[i];
580 
583  av_dict_free(&ist->decoder_opts);
586  av_freep(&ist->filters);
587  av_freep(&ist->hwaccel_device);
588  av_freep(&ist->dts_buffer);
589 
591 
592  av_freep(&input_streams[i]);
593  }
594 
595  if (vstats_file) {
596  if (fclose(vstats_file))
598  "Error closing vstats file, loss of information possible: %s\n",
599  av_err2str(AVERROR(errno)));
600  }
602 
603  av_freep(&input_streams);
604  av_freep(&input_files);
605  av_freep(&output_streams);
606  av_freep(&output_files);
607 
608  uninit_opts();
609 
611 
612  if (received_sigterm) {
613  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
614  (int) received_sigterm);
615  } else if (ret && atomic_load(&transcode_init_done)) {
616  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
617  }
618  term_exit();
619  ffmpeg_exited = 1;
620 }
621 
623 {
624  AVDictionaryEntry *t = NULL;
625 
626  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
628  }
629 }
630 
632 {
634  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
635  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
636  exit_program(1);
637  }
638 }
639 
640 static void abort_codec_experimental(AVCodec *c, int encoder)
641 {
642  exit_program(1);
643 }
644 
645 static void update_benchmark(const char *fmt, ...)
646 {
647  if (do_benchmark_all) {
648  int64_t t = getutime();
649  va_list va;
650  char buf[1024];
651 
652  if (fmt) {
653  va_start(va, fmt);
654  vsnprintf(buf, sizeof(buf), fmt, va);
655  va_end(va);
656  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
657  }
658  current_time = t;
659  }
660 }
661 
662 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
663 {
664  int i;
665  for (i = 0; i < nb_output_streams; i++) {
666  OutputStream *ost2 = output_streams[i];
667  ost2->finished |= ost == ost2 ? this_stream : others;
668  }
669 }
670 
671 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
672 {
673  AVFormatContext *s = of->ctx;
674  AVStream *st = ost->st;
675  int ret;
676 
677  /*
678  * Audio encoders may split the packets -- #frames in != #packets out.
679  * But there is no reordering, so we can limit the number of output packets
680  * by simply dropping them here.
681  * Counting encoded video frames needs to be done separately because of
682  * reordering, see do_video_out().
683  * Do not count the packet when unqueued because it has been counted when queued.
684  */
685  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
686  if (ost->frame_number >= ost->max_frames) {
687  av_packet_unref(pkt);
688  return;
689  }
690  ost->frame_number++;
691  }
692 
693  if (!of->header_written) {
694  AVPacket tmp_pkt = {0};
695  /* the muxer is not initialized yet, buffer the packet */
696  if (!av_fifo_space(ost->muxing_queue)) {
697  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
698  ost->max_muxing_queue_size);
699  if (new_size <= av_fifo_size(ost->muxing_queue)) {
701  "Too many packets buffered for output stream %d:%d.\n",
702  ost->file_index, ost->st->index);
703  exit_program(1);
704  }
705  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
706  if (ret < 0)
707  exit_program(1);
708  }
709  ret = av_packet_ref(&tmp_pkt, pkt);
710  if (ret < 0)
711  exit_program(1);
712  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
713  av_packet_unref(pkt);
714  return;
715  }
716 
719  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
720 
721  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
722  int i;
724  NULL);
725  ost->quality = sd ? AV_RL32(sd) : -1;
726  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
727 
728  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
729  if (sd && i < sd[5])
730  ost->error[i] = AV_RL64(sd + 8 + 8*i);
731  else
732  ost->error[i] = -1;
733  }
734 
735  if (ost->frame_rate.num && ost->is_cfr) {
736  if (pkt->duration > 0)
737  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
738  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
739  ost->mux_timebase);
740  }
741  }
742 
743  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
744 
745  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
746  if (pkt->dts != AV_NOPTS_VALUE &&
747  pkt->pts != AV_NOPTS_VALUE &&
748  pkt->dts > pkt->pts) {
749  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
750  pkt->dts, pkt->pts,
751  ost->file_index, ost->st->index);
752  pkt->pts =
753  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
754  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
755  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
756  }
758  pkt->dts != AV_NOPTS_VALUE &&
759  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
760  ost->last_mux_dts != AV_NOPTS_VALUE) {
761  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
762  if (pkt->dts < max) {
763  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
764  av_log(s, loglevel, "Non-monotonous DTS in output stream "
765  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
766  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
767  if (exit_on_error) {
768  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
769  exit_program(1);
770  }
771  av_log(s, loglevel, "changing to %"PRId64". This may result "
772  "in incorrect timestamps in the output file.\n",
773  max);
774  if (pkt->pts >= pkt->dts)
775  pkt->pts = FFMAX(pkt->pts, max);
776  pkt->dts = max;
777  }
778  }
779  }
780  ost->last_mux_dts = pkt->dts;
781 
782  ost->data_size += pkt->size;
783  ost->packets_written++;
784 
785  pkt->stream_index = ost->index;
786 
787  if (debug_ts) {
788  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
789  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
791  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
792  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
793  pkt->size
794  );
795  }
796 
797  ret = av_interleaved_write_frame(s, pkt);
798  if (ret < 0) {
799  print_error("av_interleaved_write_frame()", ret);
800  main_return_code = 1;
802  }
803  av_packet_unref(pkt);
804 }
805 
807 {
808  OutputFile *of = output_files[ost->file_index];
809 
810  ost->finished |= ENCODER_FINISHED;
811  if (of->shortest) {
812  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
813  of->recording_time = FFMIN(of->recording_time, end);
814  }
815 }
816 
817 /*
818  * Send a single packet to the output, applying any bitstream filters
819  * associated with the output stream. This may result in any number
820  * of packets actually being written, depending on what bitstream
821  * filters are applied. The supplied packet is consumed and will be
822  * blank (as if newly-allocated) when this function returns.
823  *
824  * If eof is set, instead indicate EOF to all bitstream filters and
825  * therefore flush any delayed packets to the output. A blank packet
826  * must be supplied in this case.
827  */
829  OutputStream *ost, int eof)
830 {
831  int ret = 0;
832 
833  /* apply the output bitstream filters, if any */
834  if (ost->nb_bitstream_filters) {
835  int idx;
836 
837  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
838  if (ret < 0)
839  goto finish;
840 
841  eof = 0;
842  idx = 1;
843  while (idx) {
844  /* get a packet from the previous filter up the chain */
845  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
846  if (ret == AVERROR(EAGAIN)) {
847  ret = 0;
848  idx--;
849  continue;
850  } else if (ret == AVERROR_EOF) {
851  eof = 1;
852  } else if (ret < 0)
853  goto finish;
854 
855  /* send it to the next filter down the chain or to the muxer */
856  if (idx < ost->nb_bitstream_filters) {
857  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
858  if (ret < 0)
859  goto finish;
860  idx++;
861  eof = 0;
862  } else if (eof)
863  goto finish;
864  else
865  write_packet(of, pkt, ost, 0);
866  }
867  } else if (!eof)
868  write_packet(of, pkt, ost, 0);
869 
870 finish:
871  if (ret < 0 && ret != AVERROR_EOF) {
872  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
873  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
874  if(exit_on_error)
875  exit_program(1);
876  }
877 }
878 
880 {
881  OutputFile *of = output_files[ost->file_index];
882 
883  if (of->recording_time != INT64_MAX &&
885  AV_TIME_BASE_Q) >= 0) {
886  close_output_stream(ost);
887  return 0;
888  }
889  return 1;
890 }
891 
892 static void do_audio_out(OutputFile *of, OutputStream *ost,
893  AVFrame *frame)
894 {
895  AVCodecContext *enc = ost->enc_ctx;
896  AVPacket pkt;
897  int ret;
898 
899  av_init_packet(&pkt);
900  pkt.data = NULL;
901  pkt.size = 0;
902 
903  if (!check_recording_time(ost))
904  return;
905 
906  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
907  frame->pts = ost->sync_opts;
908  ost->sync_opts = frame->pts + frame->nb_samples;
909  ost->samples_encoded += frame->nb_samples;
910  ost->frames_encoded++;
911 
912  av_assert0(pkt.size || !pkt.data);
914  if (debug_ts) {
915  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
916  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
917  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
918  enc->time_base.num, enc->time_base.den);
919  }
920 
921  ret = avcodec_send_frame(enc, frame);
922  if (ret < 0)
923  goto error;
924 
925  while (1) {
926  ret = avcodec_receive_packet(enc, &pkt);
927  if (ret == AVERROR(EAGAIN))
928  break;
929  if (ret < 0)
930  goto error;
931 
932  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
933 
934  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
935 
936  if (debug_ts) {
937  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
938  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
939  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
940  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
941  }
942 
943  output_packet(of, &pkt, ost, 0);
944  }
945 
946  return;
947 error:
948  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
949  exit_program(1);
950 }
951 
952 static void do_subtitle_out(OutputFile *of,
953  OutputStream *ost,
954  AVSubtitle *sub)
955 {
956  int subtitle_out_max_size = 1024 * 1024;
957  int subtitle_out_size, nb, i;
958  AVCodecContext *enc;
959  AVPacket pkt;
960  int64_t pts;
961 
962  if (sub->pts == AV_NOPTS_VALUE) {
963  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
964  if (exit_on_error)
965  exit_program(1);
966  return;
967  }
968 
969  enc = ost->enc_ctx;
970 
971  if (!subtitle_out) {
972  subtitle_out = av_malloc(subtitle_out_max_size);
973  if (!subtitle_out) {
974  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
975  exit_program(1);
976  }
977  }
978 
979  /* Note: DVB subtitle need one packet to draw them and one other
980  packet to clear them */
981  /* XXX: signal it in the codec context ? */
983  nb = 2;
984  else
985  nb = 1;
986 
987  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
988  pts = sub->pts;
989  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
990  pts -= output_files[ost->file_index]->start_time;
991  for (i = 0; i < nb; i++) {
992  unsigned save_num_rects = sub->num_rects;
993 
994  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
995  if (!check_recording_time(ost))
996  return;
997 
998  sub->pts = pts;
999  // start_display_time is required to be 0
1000  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1001  sub->end_display_time -= sub->start_display_time;
1002  sub->start_display_time = 0;
1003  if (i == 1)
1004  sub->num_rects = 0;
1005 
1006  ost->frames_encoded++;
1007 
1008  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1009  subtitle_out_max_size, sub);
1010  if (i == 1)
1011  sub->num_rects = save_num_rects;
1012  if (subtitle_out_size < 0) {
1013  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1014  exit_program(1);
1015  }
1016 
1017  av_init_packet(&pkt);
1018  pkt.data = subtitle_out;
1019  pkt.size = subtitle_out_size;
1020  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1021  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1022  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1023  /* XXX: the pts correction is handled here. Maybe handling
1024  it in the codec would be better */
1025  if (i == 0)
1026  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1027  else
1028  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1029  }
1030  pkt.dts = pkt.pts;
1031  output_packet(of, &pkt, ost, 0);
1032  }
1033 }
1034 
1035 static void do_video_out(OutputFile *of,
1036  OutputStream *ost,
1037  AVFrame *next_picture,
1038  double sync_ipts)
1039 {
1040  int ret, format_video_sync;
1041  AVPacket pkt;
1042  AVCodecContext *enc = ost->enc_ctx;
1043  AVCodecParameters *mux_par = ost->st->codecpar;
1044  AVRational frame_rate;
1045  int nb_frames, nb0_frames, i;
1046  double delta, delta0;
1047  double duration = 0;
1048  int frame_size = 0;
1049  InputStream *ist = NULL;
1051 
1052  if (ost->source_index >= 0)
1053  ist = input_streams[ost->source_index];
1054 
1055  frame_rate = av_buffersink_get_frame_rate(filter);
1056  if (frame_rate.num > 0 && frame_rate.den > 0)
1057  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1058 
1059  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1060  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1061 
1062  if (!ost->filters_script &&
1063  !ost->filters &&
1064  next_picture &&
1065  ist &&
1066  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1067  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1068  }
1069 
1070  if (!next_picture) {
1071  //end, flushing
1072  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1073  ost->last_nb0_frames[1],
1074  ost->last_nb0_frames[2]);
1075  } else {
1076  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1077  delta = delta0 + duration;
1078 
1079  /* by default, we output a single frame */
1080  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1081  nb_frames = 1;
1082 
1083  format_video_sync = video_sync_method;
1084  if (format_video_sync == VSYNC_AUTO) {
1085  if(!strcmp(of->ctx->oformat->name, "avi")) {
1086  format_video_sync = VSYNC_VFR;
1087  } else
1088  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1089  if ( ist
1090  && format_video_sync == VSYNC_CFR
1091  && input_files[ist->file_index]->ctx->nb_streams == 1
1092  && input_files[ist->file_index]->input_ts_offset == 0) {
1093  format_video_sync = VSYNC_VSCFR;
1094  }
1095  if (format_video_sync == VSYNC_CFR && copy_ts) {
1096  format_video_sync = VSYNC_VSCFR;
1097  }
1098  }
1099  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1100 
1101  if (delta0 < 0 &&
1102  delta > 0 &&
1103  format_video_sync != VSYNC_PASSTHROUGH &&
1104  format_video_sync != VSYNC_DROP) {
1105  if (delta0 < -0.6) {
1106  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1107  } else
1108  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1109  sync_ipts = ost->sync_opts;
1110  duration += delta0;
1111  delta0 = 0;
1112  }
1113 
1114  switch (format_video_sync) {
1115  case VSYNC_VSCFR:
1116  if (ost->frame_number == 0 && delta0 >= 0.5) {
1117  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1118  delta = duration;
1119  delta0 = 0;
1120  ost->sync_opts = lrint(sync_ipts);
1121  }
1122  case VSYNC_CFR:
1123  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1124  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1125  nb_frames = 0;
1126  } else if (delta < -1.1)
1127  nb_frames = 0;
1128  else if (delta > 1.1) {
1129  nb_frames = lrintf(delta);
1130  if (delta0 > 1.1)
1131  nb0_frames = lrintf(delta0 - 0.6);
1132  }
1133  break;
1134  case VSYNC_VFR:
1135  if (delta <= -0.6)
1136  nb_frames = 0;
1137  else if (delta > 0.6)
1138  ost->sync_opts = lrint(sync_ipts);
1139  break;
1140  case VSYNC_DROP:
1141  case VSYNC_PASSTHROUGH:
1142  ost->sync_opts = lrint(sync_ipts);
1143  break;
1144  default:
1145  av_assert0(0);
1146  }
1147  }
1148 
1149  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1150  nb0_frames = FFMIN(nb0_frames, nb_frames);
1151 
1152  memmove(ost->last_nb0_frames + 1,
1153  ost->last_nb0_frames,
1154  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1155  ost->last_nb0_frames[0] = nb0_frames;
1156 
1157  if (nb0_frames == 0 && ost->last_dropped) {
1158  nb_frames_drop++;
1160  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1161  ost->frame_number, ost->st->index, ost->last_frame->pts);
1162  }
1163  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1164  if (nb_frames > dts_error_threshold * 30) {
1165  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1166  nb_frames_drop++;
1167  return;
1168  }
1169  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1170  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1171  if (nb_frames_dup > dup_warning) {
1172  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1173  dup_warning *= 10;
1174  }
1175  }
1176  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1177 
1178  /* duplicates frame if needed */
1179  for (i = 0; i < nb_frames; i++) {
1180  AVFrame *in_picture;
1181  av_init_packet(&pkt);
1182  pkt.data = NULL;
1183  pkt.size = 0;
1184 
1185  if (i < nb0_frames && ost->last_frame) {
1186  in_picture = ost->last_frame;
1187  } else
1188  in_picture = next_picture;
1189 
1190  if (!in_picture)
1191  return;
1192 
1193  in_picture->pts = ost->sync_opts;
1194 
1195 #if 1
1196  if (!check_recording_time(ost))
1197 #else
1198  if (ost->frame_number >= ost->max_frames)
1199 #endif
1200  return;
1201 
1202 #if FF_API_LAVF_FMT_RAWPICTURE
1203  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1204  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1205  /* raw pictures are written as AVPicture structure to
1206  avoid any copies. We support temporarily the older
1207  method. */
1208  if (in_picture->interlaced_frame)
1209  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1210  else
1211  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1212  pkt.data = (uint8_t *)in_picture;
1213  pkt.size = sizeof(AVPicture);
1214  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
1215  pkt.flags |= AV_PKT_FLAG_KEY;
1216 
1217  output_packet(of, &pkt, ost, 0);
1218  } else
1219 #endif
1220  {
1221  int forced_keyframe = 0;
1222  double pts_time;
1223 
1225  ost->top_field_first >= 0)
1226  in_picture->top_field_first = !!ost->top_field_first;
1227 
1228  if (in_picture->interlaced_frame) {
1229  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1230  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1231  else
1232  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1233  } else
1234  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1235 
1236  in_picture->quality = enc->global_quality;
1237  in_picture->pict_type = 0;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  in_picture->pts * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1264  }
1265 
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1) {
1270  forced_keyframe = 1;
1271  }
1272 
1273  if (forced_keyframe) {
1274  in_picture->pict_type = AV_PICTURE_TYPE_I;
1275  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276  }
1277 
1279  if (debug_ts) {
1280  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283  enc->time_base.num, enc->time_base.den);
1284  }
1285 
1286  ost->frames_encoded++;
1287 
1288  ret = avcodec_send_frame(enc, in_picture);
1289  if (ret < 0)
1290  goto error;
1291 
1292  while (1) {
1293  ret = avcodec_receive_packet(enc, &pkt);
1294  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1295  if (ret == AVERROR(EAGAIN))
1296  break;
1297  if (ret < 0)
1298  goto error;
1299 
1300  if (debug_ts) {
1301  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1302  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1303  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1304  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1305  }
1306 
1307  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1308  pkt.pts = ost->sync_opts;
1309 
1310  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1311 
1312  if (debug_ts) {
1313  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1314  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1315  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1316  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1317  }
1318 
1319  frame_size = pkt.size;
1320  output_packet(of, &pkt, ost, 0);
1321 
1322  /* if two pass, output log */
1323  if (ost->logfile && enc->stats_out) {
1324  fprintf(ost->logfile, "%s", enc->stats_out);
1325  }
1326  }
1327  }
1328  ost->sync_opts++;
1329  /*
1330  * For video, number of frames in == number of packets out.
1331  * But there may be reordering, so we can't throw away frames on encoder
1332  * flush, we need to limit them here, before they go into encoder.
1333  */
1334  ost->frame_number++;
1335 
1336  if (vstats_filename && frame_size)
1337  do_video_stats(ost, frame_size);
1338  }
1339 
1340  if (!ost->last_frame)
1341  ost->last_frame = av_frame_alloc();
1342  av_frame_unref(ost->last_frame);
1343  if (next_picture && ost->last_frame)
1344  av_frame_ref(ost->last_frame, next_picture);
1345  else
1346  av_frame_free(&ost->last_frame);
1347 
1348  return;
1349 error:
1350  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1351  exit_program(1);
1352 }
1353 
1354 static double psnr(double d)
1355 {
1356  return -10.0 * log10(d);
1357 }
1358 
1360 {
1361  AVCodecContext *enc;
1362  int frame_number;
1363  double ti1, bitrate, avg_bitrate;
1364 
1365  /* this is executed just the first time do_video_stats is called */
1366  if (!vstats_file) {
1367  vstats_file = fopen(vstats_filename, "w");
1368  if (!vstats_file) {
1369  perror("fopen");
1370  exit_program(1);
1371  }
1372  }
1373 
1374  enc = ost->enc_ctx;
1375  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1376  frame_number = ost->st->nb_frames;
1377  if (vstats_version <= 1) {
1378  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1379  ost->quality / (float)FF_QP2LAMBDA);
1380  } else {
1381  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1382  ost->quality / (float)FF_QP2LAMBDA);
1383  }
1384 
1385  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1386  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1387 
1388  fprintf(vstats_file,"f_size= %6d ", frame_size);
1389  /* compute pts value */
1390  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1391  if (ti1 < 0.01)
1392  ti1 = 0.01;
1393 
1394  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1395  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1396  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1397  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1398  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1399  }
1400 }
1401 
1402 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1403 
1405 {
1406  OutputFile *of = output_files[ost->file_index];
1407  int i;
1408 
1410 
1411  if (of->shortest) {
1412  for (i = 0; i < of->ctx->nb_streams; i++)
1413  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1414  }
1415 }
1416 
1417 /**
1418  * Get and encode new output from any of the filtergraphs, without causing
1419  * activity.
1420  *
1421  * @return 0 for success, <0 for severe errors
1422  */
1423 static int reap_filters(int flush)
1424 {
1425  AVFrame *filtered_frame = NULL;
1426  int i;
1427 
1428  /* Reap all buffers present in the buffer sinks */
1429  for (i = 0; i < nb_output_streams; i++) {
1430  OutputStream *ost = output_streams[i];
1431  OutputFile *of = output_files[ost->file_index];
1433  AVCodecContext *enc = ost->enc_ctx;
1434  int ret = 0;
1435 
1436  if (!ost->filter || !ost->filter->graph->graph)
1437  continue;
1438  filter = ost->filter->filter;
1439 
1440  if (!ost->initialized) {
1441  char error[1024] = "";
1442  ret = init_output_stream(ost, error, sizeof(error));
1443  if (ret < 0) {
1444  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1445  ost->file_index, ost->index, error);
1446  exit_program(1);
1447  }
1448  }
1449 
1450  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1451  return AVERROR(ENOMEM);
1452  }
1453  filtered_frame = ost->filtered_frame;
1454 
1455  while (1) {
1456  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1457  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1459  if (ret < 0) {
1460  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1462  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1463  } else if (flush && ret == AVERROR_EOF) {
1465  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1466  }
1467  break;
1468  }
1469  if (ost->finished) {
1470  av_frame_unref(filtered_frame);
1471  continue;
1472  }
1473  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1474  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1475  AVRational filter_tb = av_buffersink_get_time_base(filter);
1476  AVRational tb = enc->time_base;
1477  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1478 
1479  tb.den <<= extra_bits;
1480  float_pts =
1481  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1482  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1483  float_pts /= 1 << extra_bits;
1484  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1485  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1486 
1487  filtered_frame->pts =
1488  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1489  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1490  }
1491  //if (ost->source_index >= 0)
1492  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1493 
1494  switch (av_buffersink_get_type(filter)) {
1495  case AVMEDIA_TYPE_VIDEO:
1496  if (!ost->frame_aspect_ratio.num)
1497  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1498 
1499  if (debug_ts) {
1500  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1501  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1502  float_pts,
1503  enc->time_base.num, enc->time_base.den);
1504  }
1505 
1506  do_video_out(of, ost, filtered_frame, float_pts);
1507  break;
1508  case AVMEDIA_TYPE_AUDIO:
1509  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1510  enc->channels != filtered_frame->channels) {
1512  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1513  break;
1514  }
1515  do_audio_out(of, ost, filtered_frame);
1516  break;
1517  default:
1518  // TODO support subtitle filters
1519  av_assert0(0);
1520  }
1521 
1522  av_frame_unref(filtered_frame);
1523  }
1524  }
1525 
1526  return 0;
1527 }
1528 
1529 static void print_final_stats(int64_t total_size)
1530 {
1531  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1532  uint64_t subtitle_size = 0;
1533  uint64_t data_size = 0;
1534  float percent = -1.0;
1535  int i, j;
1536  int pass1_used = 1;
1537 
1538  for (i = 0; i < nb_output_streams; i++) {
1539  OutputStream *ost = output_streams[i];
1540  switch (ost->enc_ctx->codec_type) {
1541  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1543  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1544  default: other_size += ost->data_size; break;
1545  }
1546  extra_size += ost->enc_ctx->extradata_size;
1547  data_size += ost->data_size;
1550  pass1_used = 0;
1551  }
1552 
1553  if (data_size && total_size>0 && total_size >= data_size)
1554  percent = 100.0 * (total_size - data_size) / data_size;
1555 
1556  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1557  video_size / 1024.0,
1558  audio_size / 1024.0,
1559  subtitle_size / 1024.0,
1560  other_size / 1024.0,
1561  extra_size / 1024.0);
1562  if (percent >= 0.0)
1563  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1564  else
1565  av_log(NULL, AV_LOG_INFO, "unknown");
1566  av_log(NULL, AV_LOG_INFO, "\n");
1567 
1568  /* print verbose per-stream stats */
1569  for (i = 0; i < nb_input_files; i++) {
1570  InputFile *f = input_files[i];
1571  uint64_t total_packets = 0, total_size = 0;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1574  i, f->ctx->filename);
1575 
1576  for (j = 0; j < f->nb_streams; j++) {
1577  InputStream *ist = input_streams[f->ist_index + j];
1578  enum AVMediaType type = ist->dec_ctx->codec_type;
1579 
1580  total_size += ist->data_size;
1581  total_packets += ist->nb_packets;
1582 
1583  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1584  i, j, media_type_string(type));
1585  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1586  ist->nb_packets, ist->data_size);
1587 
1588  if (ist->decoding_needed) {
1589  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1590  ist->frames_decoded);
1591  if (type == AVMEDIA_TYPE_AUDIO)
1592  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1593  av_log(NULL, AV_LOG_VERBOSE, "; ");
1594  }
1595 
1596  av_log(NULL, AV_LOG_VERBOSE, "\n");
1597  }
1598 
1599  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1600  total_packets, total_size);
1601  }
1602 
1603  for (i = 0; i < nb_output_files; i++) {
1604  OutputFile *of = output_files[i];
1605  uint64_t total_packets = 0, total_size = 0;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1608  i, of->ctx->filename);
1609 
1610  for (j = 0; j < of->ctx->nb_streams; j++) {
1611  OutputStream *ost = output_streams[of->ost_index + j];
1612  enum AVMediaType type = ost->enc_ctx->codec_type;
1613 
1614  total_size += ost->data_size;
1615  total_packets += ost->packets_written;
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1618  i, j, media_type_string(type));
1619  if (ost->encoding_needed) {
1620  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1621  ost->frames_encoded);
1622  if (type == AVMEDIA_TYPE_AUDIO)
1623  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1624  av_log(NULL, AV_LOG_VERBOSE, "; ");
1625  }
1626 
1627  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1628  ost->packets_written, ost->data_size);
1629 
1630  av_log(NULL, AV_LOG_VERBOSE, "\n");
1631  }
1632 
1633  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1634  total_packets, total_size);
1635  }
1636  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1637  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1638  if (pass1_used) {
1639  av_log(NULL, AV_LOG_WARNING, "\n");
1640  } else {
1641  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1642  }
1643  }
1644 }
1645 
1646 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1647 {
1648  char buf[1024];
1649  AVBPrint buf_script;
1650  OutputStream *ost;
1651  AVFormatContext *oc;
1652  int64_t total_size;
1653  AVCodecContext *enc;
1654  int frame_number, vid, i;
1655  double bitrate;
1656  double speed;
1657  int64_t pts = INT64_MIN + 1;
1658  static int64_t last_time = -1;
1659  static int qp_histogram[52];
1660  int hours, mins, secs, us;
1661  int ret;
1662  float t;
1663 
1664  if (!print_stats && !is_last_report && !progress_avio)
1665  return;
1666 
1667  if (!is_last_report) {
1668  if (last_time == -1) {
1669  last_time = cur_time;
1670  return;
1671  }
1672  if ((cur_time - last_time) < 500000)
1673  return;
1674  last_time = cur_time;
1675  }
1676 
1677  t = (cur_time-timer_start) / 1000000.0;
1678 
1679 
1680  oc = output_files[0]->ctx;
1681 
1682  total_size = avio_size(oc->pb);
1683  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1684  total_size = avio_tell(oc->pb);
1685 
1686  buf[0] = '\0';
1687  vid = 0;
1688  av_bprint_init(&buf_script, 0, 1);
1689  for (i = 0; i < nb_output_streams; i++) {
1690  float q = -1;
1691  ost = output_streams[i];
1692  enc = ost->enc_ctx;
1693  if (!ost->stream_copy)
1694  q = ost->quality / (float) FF_QP2LAMBDA;
1695 
1696  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1697  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1698  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1699  ost->file_index, ost->index, q);
1700  }
1701  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702  float fps;
1703 
1704  frame_number = ost->frame_number;
1705  fps = t > 1 ? frame_number / t : 0;
1706  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1707  frame_number, fps < 9.95, fps, q);
1708  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1709  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1710  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1711  ost->file_index, ost->index, q);
1712  if (is_last_report)
1713  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1714  if (qp_hist) {
1715  int j;
1716  int qp = lrintf(q);
1717  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1718  qp_histogram[qp]++;
1719  for (j = 0; j < 32; j++)
1720  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1721  }
1722 
1723  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1724  int j;
1725  double error, error_sum = 0;
1726  double scale, scale_sum = 0;
1727  double p;
1728  char type[3] = { 'Y','U','V' };
1729  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1730  for (j = 0; j < 3; j++) {
1731  if (is_last_report) {
1732  error = enc->error[j];
1733  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1734  } else {
1735  error = ost->error[j];
1736  scale = enc->width * enc->height * 255.0 * 255.0;
1737  }
1738  if (j)
1739  scale /= 4;
1740  error_sum += error;
1741  scale_sum += scale;
1742  p = psnr(error / scale);
1743  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1744  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1745  ost->file_index, ost->index, type[j] | 32, p);
1746  }
1747  p = psnr(error_sum / scale_sum);
1748  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1749  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1750  ost->file_index, ost->index, p);
1751  }
1752  vid = 1;
1753  }
1754  /* compute min output value */
1756  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1757  ost->st->time_base, AV_TIME_BASE_Q));
1758  if (is_last_report)
1759  nb_frames_drop += ost->last_dropped;
1760  }
1761 
1762  secs = FFABS(pts) / AV_TIME_BASE;
1763  us = FFABS(pts) % AV_TIME_BASE;
1764  mins = secs / 60;
1765  secs %= 60;
1766  hours = mins / 60;
1767  mins %= 60;
1768 
1769  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 
1772  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1773  "size=N/A time=");
1774  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1775  "size=%8.0fkB time=", total_size / 1024.0);
1776  if (pts < 0)
1777  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1778  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1779  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1780  (100 * us) / AV_TIME_BASE);
1781 
1782  if (bitrate < 0) {
1783  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1784  av_bprintf(&buf_script, "bitrate=N/A\n");
1785  }else{
1786  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1787  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1788  }
1789 
1790  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1791  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1792  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1793  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1794  hours, mins, secs, us);
1795 
1797  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1799  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1800  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1801 
1802  if (speed < 0) {
1803  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1804  av_bprintf(&buf_script, "speed=N/A\n");
1805  } else {
1806  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1807  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1808  }
1809 
1810  if (print_stats || is_last_report) {
1811  const char end = is_last_report ? '\n' : '\r';
1812  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1813  fprintf(stderr, "%s %c", buf, end);
1814  } else
1815  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1816 
1817  fflush(stderr);
1818  }
1819 
1820  if (progress_avio) {
1821  av_bprintf(&buf_script, "progress=%s\n",
1822  is_last_report ? "end" : "continue");
1823  avio_write(progress_avio, buf_script.str,
1824  FFMIN(buf_script.len, buf_script.size - 1));
1825  avio_flush(progress_avio);
1826  av_bprint_finalize(&buf_script, NULL);
1827  if (is_last_report) {
1828  if ((ret = avio_closep(&progress_avio)) < 0)
1830  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1831  }
1832  }
1833 
1834  if (is_last_report)
1835  print_final_stats(total_size);
1836 }
1837 
1838 static void flush_encoders(void)
1839 {
1840  int i, ret;
1841 
1842  for (i = 0; i < nb_output_streams; i++) {
1843  OutputStream *ost = output_streams[i];
1844  AVCodecContext *enc = ost->enc_ctx;
1845  OutputFile *of = output_files[ost->file_index];
1846 
1847  if (!ost->encoding_needed)
1848  continue;
1849 
1850  // Try to enable encoding with no input frames.
1851  // Maybe we should just let encoding fail instead.
1852  if (!ost->initialized) {
1853  FilterGraph *fg = ost->filter->graph;
1854  char error[1024] = "";
1855 
1857  "Finishing stream %d:%d without any data written to it.\n",
1858  ost->file_index, ost->st->index);
1859 
1860  if (ost->filter && !fg->graph) {
1861  int x;
1862  for (x = 0; x < fg->nb_inputs; x++) {
1863  InputFilter *ifilter = fg->inputs[x];
1864  if (ifilter->format < 0) {
1865  AVCodecParameters *par = ifilter->ist->st->codecpar;
1866  // We never got any input. Set a fake format, which will
1867  // come from libavformat.
1868  ifilter->format = par->format;
1869  ifilter->sample_rate = par->sample_rate;
1870  ifilter->channels = par->channels;
1871  ifilter->channel_layout = par->channel_layout;
1872  ifilter->width = par->width;
1873  ifilter->height = par->height;
1874  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1875  }
1876  }
1877 
1879  continue;
1880 
1881  ret = configure_filtergraph(fg);
1882  if (ret < 0) {
1883  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884  exit_program(1);
1885  }
1886 
1887  finish_output_stream(ost);
1888  }
1889 
1890  ret = init_output_stream(ost, error, sizeof(error));
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893  ost->file_index, ost->index, error);
1894  exit_program(1);
1895  }
1896  }
1897 
1898  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1899  continue;
1900 #if FF_API_LAVF_FMT_RAWPICTURE
1901  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1902  continue;
1903 #endif
1904 
1906  continue;
1907 
1908  for (;;) {
1909  const char *desc = NULL;
1910  AVPacket pkt;
1911  int pkt_size;
1912 
1913  switch (enc->codec_type) {
1914  case AVMEDIA_TYPE_AUDIO:
1915  desc = "audio";
1916  break;
1917  case AVMEDIA_TYPE_VIDEO:
1918  desc = "video";
1919  break;
1920  default:
1921  av_assert0(0);
1922  }
1923 
1924  av_init_packet(&pkt);
1925  pkt.data = NULL;
1926  pkt.size = 0;
1927 
1929 
1930  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1931  ret = avcodec_send_frame(enc, NULL);
1932  if (ret < 0) {
1933  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1934  desc,
1935  av_err2str(ret));
1936  exit_program(1);
1937  }
1938  }
1939 
1940  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1941  if (ret < 0 && ret != AVERROR_EOF) {
1942  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1943  desc,
1944  av_err2str(ret));
1945  exit_program(1);
1946  }
1947  if (ost->logfile && enc->stats_out) {
1948  fprintf(ost->logfile, "%s", enc->stats_out);
1949  }
1950  if (ret == AVERROR_EOF) {
1951  output_packet(of, &pkt, ost, 1);
1952  break;
1953  }
1954  if (ost->finished & MUXER_FINISHED) {
1955  av_packet_unref(&pkt);
1956  continue;
1957  }
1958  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1959  pkt_size = pkt.size;
1960  output_packet(of, &pkt, ost, 0);
1962  do_video_stats(ost, pkt_size);
1963  }
1964  }
1965  }
1966 }
1967 
1968 /*
1969  * Check whether a packet from ist should be written into ost at this time
1970  */
1972 {
1973  OutputFile *of = output_files[ost->file_index];
1974  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1975 
1976  if (ost->source_index != ist_index)
1977  return 0;
1978 
1979  if (ost->finished)
1980  return 0;
1981 
1982  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1983  return 0;
1984 
1985  return 1;
1986 }
1987 
1988 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1989 {
1990  OutputFile *of = output_files[ost->file_index];
1991  InputFile *f = input_files [ist->file_index];
1992  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1993  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1994  AVPicture pict;
1995  AVPacket opkt;
1996 
1997  av_init_packet(&opkt);
1998 
1999  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2001  return;
2002 
2003  if (!ost->frame_number && !ost->copy_prior_start) {
2004  int64_t comp_start = start_time;
2005  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2006  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2007  if (pkt->pts == AV_NOPTS_VALUE ?
2008  ist->pts < comp_start :
2009  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2010  return;
2011  }
2012 
2013  if (of->recording_time != INT64_MAX &&
2014  ist->pts >= of->recording_time + start_time) {
2015  close_output_stream(ost);
2016  return;
2017  }
2018 
2019  if (f->recording_time != INT64_MAX) {
2020  start_time = f->ctx->start_time;
2021  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2022  start_time += f->start_time;
2023  if (ist->pts >= f->recording_time + start_time) {
2024  close_output_stream(ost);
2025  return;
2026  }
2027  }
2028 
2029  /* force the input stream PTS */
2030  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2031  ost->sync_opts++;
2032 
2033  if (pkt->pts != AV_NOPTS_VALUE)
2034  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2035  else
2036  opkt.pts = AV_NOPTS_VALUE;
2037 
2038  if (pkt->dts == AV_NOPTS_VALUE)
2039  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2040  else
2041  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2042  opkt.dts -= ost_tb_start_time;
2043 
2044  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2046  if(!duration)
2047  duration = ist->dec_ctx->frame_size;
2048  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2050  ost->mux_timebase) - ost_tb_start_time;
2051  }
2052 
2053  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2054 
2055  opkt.flags = pkt->flags;
2056  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
2057  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
2058  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
2059  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
2060  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
2061  ) {
2062  int ret = av_parser_change(ost->parser, ost->parser_avctx,
2063  &opkt.data, &opkt.size,
2064  pkt->data, pkt->size,
2066  if (ret < 0) {
2067  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
2068  av_err2str(ret));
2069  exit_program(1);
2070  }
2071  if (ret) {
2072  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
2073  if (!opkt.buf)
2074  exit_program(1);
2075  }
2076  } else {
2077  opkt.data = pkt->data;
2078  opkt.size = pkt->size;
2079  }
2080  av_copy_packet_side_data(&opkt, pkt);
2081 
2082 #if FF_API_LAVF_FMT_RAWPICTURE
2083  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
2084  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
2085  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
2086  /* store AVPicture in AVPacket, as expected by the output format */
2087  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
2088  if (ret < 0) {
2089  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
2090  av_err2str(ret));
2091  exit_program(1);
2092  }
2093  opkt.data = (uint8_t *)&pict;
2094  opkt.size = sizeof(AVPicture);
2095  opkt.flags |= AV_PKT_FLAG_KEY;
2096  }
2097 #endif
2098 
2099  output_packet(of, &opkt, ost, 0);
2100 }
2101 
2103 {
2104  AVCodecContext *dec = ist->dec_ctx;
2105 
2106  if (!dec->channel_layout) {
2107  char layout_name[256];
2108 
2109  if (dec->channels > ist->guess_layout_max)
2110  return 0;
2112  if (!dec->channel_layout)
2113  return 0;
2114  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2115  dec->channels, dec->channel_layout);
2116  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2117  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2118  }
2119  return 1;
2120 }
2121 
2122 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2123 {
2124  if (*got_output || ret<0)
2125  decode_error_stat[ret<0] ++;
2126 
2127  if (ret < 0 && exit_on_error)
2128  exit_program(1);
2129 
2130  if (exit_on_error && *got_output && ist) {
2132  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2133  exit_program(1);
2134  }
2135  }
2136 }
2137 
2138 // Filters can be configured only if the formats of all inputs are known.
2140 {
2141  int i;
2142  for (i = 0; i < fg->nb_inputs; i++) {
2143  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2144  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2145  return 0;
2146  }
2147  return 1;
2148 }
2149 
2151 {
2152  FilterGraph *fg = ifilter->graph;
2153  int need_reinit, ret, i;
2154 
2155  /* determine if the parameters for this input changed */
2156  need_reinit = ifilter->format != frame->format;
2157  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2158  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2159  need_reinit = 1;
2160 
2161  switch (ifilter->ist->st->codecpar->codec_type) {
2162  case AVMEDIA_TYPE_AUDIO:
2163  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2164  ifilter->channels != frame->channels ||
2165  ifilter->channel_layout != frame->channel_layout;
2166  break;
2167  case AVMEDIA_TYPE_VIDEO:
2168  need_reinit |= ifilter->width != frame->width ||
2169  ifilter->height != frame->height;
2170  break;
2171  }
2172 
2173  if (need_reinit) {
2174  ret = ifilter_parameters_from_frame(ifilter, frame);
2175  if (ret < 0)
2176  return ret;
2177  }
2178 
2179  /* (re)init the graph if possible, otherwise buffer the frame and return */
2180  if (need_reinit || !fg->graph) {
2181  for (i = 0; i < fg->nb_inputs; i++) {
2182  if (!ifilter_has_all_input_formats(fg)) {
2183  AVFrame *tmp = av_frame_clone(frame);
2184  if (!tmp)
2185  return AVERROR(ENOMEM);
2186  av_frame_unref(frame);
2187 
2188  if (!av_fifo_space(ifilter->frame_queue)) {
2189  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2190  if (ret < 0) {
2191  av_frame_free(&tmp);
2192  return ret;
2193  }
2194  }
2195  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2196  return 0;
2197  }
2198  }
2199 
2200  ret = reap_filters(1);
2201  if (ret < 0 && ret != AVERROR_EOF) {
2202  char errbuf[128];
2203  av_strerror(ret, errbuf, sizeof(errbuf));
2204 
2205  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
2206  return ret;
2207  }
2208 
2209  ret = configure_filtergraph(fg);
2210  if (ret < 0) {
2211  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2212  return ret;
2213  }
2214  }
2215 
2217  if (ret < 0) {
2218  if (ret != AVERROR_EOF)
2219  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2220  return ret;
2221  }
2222 
2223  return 0;
2224 }
2225 
2226 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2227 {
2228  int i, j, ret;
2229 
2230  ifilter->eof = 1;
2231 
2232  if (ifilter->filter) {
2233  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2234  if (ret < 0)
2235  return ret;
2236  } else {
2237  // the filtergraph was never configured
2238  FilterGraph *fg = ifilter->graph;
2239  for (i = 0; i < fg->nb_inputs; i++)
2240  if (!fg->inputs[i]->eof)
2241  break;
2242  if (i == fg->nb_inputs) {
2243  // All the input streams have finished without the filtergraph
2244  // ever being configured.
2245  // Mark the output streams as finished.
2246  for (j = 0; j < fg->nb_outputs; j++)
2247  finish_output_stream(fg->outputs[j]->ost);
2248  }
2249  }
2250 
2251  return 0;
2252 }
2253 
2254 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2255 // There is the following difference: if you got a frame, you must call
2256 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2257 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2258 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2259 {
2260  int ret;
2261 
2262  *got_frame = 0;
2263 
2264  if (pkt) {
2265  ret = avcodec_send_packet(avctx, pkt);
2266  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2267  // decoded frames with avcodec_receive_frame() until done.
2268  if (ret < 0 && ret != AVERROR_EOF)
2269  return ret;
2270  }
2271 
2272  ret = avcodec_receive_frame(avctx, frame);
2273  if (ret < 0 && ret != AVERROR(EAGAIN))
2274  return ret;
2275  if (ret >= 0)
2276  *got_frame = 1;
2277 
2278  return 0;
2279 }
2280 
2281 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2282 {
2283  int i, ret;
2284  AVFrame *f;
2285 
2286  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287  for (i = 0; i < ist->nb_filters; i++) {
2288  if (i < ist->nb_filters - 1) {
2289  f = ist->filter_frame;
2290  ret = av_frame_ref(f, decoded_frame);
2291  if (ret < 0)
2292  break;
2293  } else
2294  f = decoded_frame;
2295  ret = ifilter_send_frame(ist->filters[i], f);
2296  if (ret == AVERROR_EOF)
2297  ret = 0; /* ignore */
2298  if (ret < 0) {
2300  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2301  break;
2302  }
2303  }
2304  return ret;
2305 }
2306 
2307 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2308  int *decode_failed)
2309 {
2310  AVFrame *decoded_frame;
2311  AVCodecContext *avctx = ist->dec_ctx;
2312  int ret, err = 0;
2313  AVRational decoded_frame_tb;
2314 
2315  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2316  return AVERROR(ENOMEM);
2317  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2318  return AVERROR(ENOMEM);
2319  decoded_frame = ist->decoded_frame;
2320 
2322  ret = decode(avctx, decoded_frame, got_output, pkt);
2323  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2324  if (ret < 0)
2325  *decode_failed = 1;
2326 
2327  if (ret >= 0 && avctx->sample_rate <= 0) {
2328  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2329  ret = AVERROR_INVALIDDATA;
2330  }
2331 
2332  if (ret != AVERROR_EOF)
2333  check_decode_result(ist, got_output, ret);
2334 
2335  if (!*got_output || ret < 0)
2336  return ret;
2337 
2338  ist->samples_decoded += decoded_frame->nb_samples;
2339  ist->frames_decoded++;
2340 
2341 #if 1
2342  /* increment next_dts to use for the case where the input stream does not
2343  have timestamps or there are multiple frames in the packet */
2344  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2345  avctx->sample_rate;
2346  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2347  avctx->sample_rate;
2348 #endif
2349 
2350  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2351  decoded_frame_tb = ist->st->time_base;
2352  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2353  decoded_frame->pts = pkt->pts;
2354  decoded_frame_tb = ist->st->time_base;
2355  }else {
2356  decoded_frame->pts = ist->dts;
2357  decoded_frame_tb = AV_TIME_BASE_Q;
2358  }
2359  if (decoded_frame->pts != AV_NOPTS_VALUE)
2360  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2361  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2362  (AVRational){1, avctx->sample_rate});
2363  ist->nb_samples = decoded_frame->nb_samples;
2364  err = send_frame_to_filters(ist, decoded_frame);
2365 
2366  av_frame_unref(ist->filter_frame);
2367  av_frame_unref(decoded_frame);
2368  return err < 0 ? err : ret;
2369 }
2370 
2371 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2372  int *decode_failed)
2373 {
2374  AVFrame *decoded_frame;
2375  int i, ret = 0, err = 0;
2376  int64_t best_effort_timestamp;
2377  int64_t dts = AV_NOPTS_VALUE;
2378  AVPacket avpkt;
2379 
2380  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2381  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2382  // skip the packet.
2383  if (!eof && pkt && pkt->size == 0)
2384  return 0;
2385 
2386  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2387  return AVERROR(ENOMEM);
2388  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2389  return AVERROR(ENOMEM);
2390  decoded_frame = ist->decoded_frame;
2391  if (ist->dts != AV_NOPTS_VALUE)
2392  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2393  if (pkt) {
2394  avpkt = *pkt;
2395  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2396  }
2397 
2398  // The old code used to set dts on the drain packet, which does not work
2399  // with the new API anymore.
2400  if (eof) {
2401  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2402  if (!new)
2403  return AVERROR(ENOMEM);
2404  ist->dts_buffer = new;
2405  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2406  }
2407 
2409  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2410  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2411  if (ret < 0)
2412  *decode_failed = 1;
2413 
2414  // The following line may be required in some cases where there is no parser
2415  // or the parser does not has_b_frames correctly
2416  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2417  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2418  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2419  } else
2421  "video_delay is larger in decoder than demuxer %d > %d.\n"
2422  "If you want to help, upload a sample "
2423  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2424  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2425  ist->dec_ctx->has_b_frames,
2426  ist->st->codecpar->video_delay);
2427  }
2428 
2429  if (ret != AVERROR_EOF)
2430  check_decode_result(ist, got_output, ret);
2431 
2432  if (*got_output && ret >= 0) {
2433  if (ist->dec_ctx->width != decoded_frame->width ||
2434  ist->dec_ctx->height != decoded_frame->height ||
2435  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2436  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2437  decoded_frame->width,
2438  decoded_frame->height,
2439  decoded_frame->format,
2440  ist->dec_ctx->width,
2441  ist->dec_ctx->height,
2442  ist->dec_ctx->pix_fmt);
2443  }
2444  }
2445 
2446  if (!*got_output || ret < 0)
2447  return ret;
2448 
2449  if(ist->top_field_first>=0)
2450  decoded_frame->top_field_first = ist->top_field_first;
2451 
2452  ist->frames_decoded++;
2453 
2454  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2455  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2456  if (err < 0)
2457  goto fail;
2458  }
2459  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2460 
2461  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2462  *duration_pts = decoded_frame->pkt_duration;
2463 
2464  if (ist->framerate.num)
2465  best_effort_timestamp = ist->cfr_next_pts++;
2466 
2467  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2468  best_effort_timestamp = ist->dts_buffer[0];
2469 
2470  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2471  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2472  ist->nb_dts_buffer--;
2473  }
2474 
2475  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2476  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2477 
2478  if (ts != AV_NOPTS_VALUE)
2479  ist->next_pts = ist->pts = ts;
2480  }
2481 
2482  if (debug_ts) {
2483  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2484  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2485  ist->st->index, av_ts2str(decoded_frame->pts),
2486  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2487  best_effort_timestamp,
2488  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2489  decoded_frame->key_frame, decoded_frame->pict_type,
2490  ist->st->time_base.num, ist->st->time_base.den);
2491  }
2492 
2493  if (ist->st->sample_aspect_ratio.num)
2494  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2495 
2496  err = send_frame_to_filters(ist, decoded_frame);
2497 
2498 fail:
2500  av_frame_unref(decoded_frame);
2501  return err < 0 ? err : ret;
2502 }
2503 
2504 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2505  int *decode_failed)
2506 {
2507  AVSubtitle subtitle;
2508  int free_sub = 1;
2509  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2510  &subtitle, got_output, pkt);
2511 
2512  check_decode_result(NULL, got_output, ret);
2513 
2514  if (ret < 0 || !*got_output) {
2515  *decode_failed = 1;
2516  if (!pkt->size)
2517  sub2video_flush(ist);
2518  return ret;
2519  }
2520 
2521  if (ist->fix_sub_duration) {
2522  int end = 1;
2523  if (ist->prev_sub.got_output) {
2524  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2525  1000, AV_TIME_BASE);
2526  if (end < ist->prev_sub.subtitle.end_display_time) {
2527  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2528  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2530  end <= 0 ? ", dropping it" : "");
2532  }
2533  }
2534  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2535  FFSWAP(int, ret, ist->prev_sub.ret);
2536  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2537  if (end <= 0)
2538  goto out;
2539  }
2540 
2541  if (!*got_output)
2542  return ret;
2543 
2544  if (ist->sub2video.frame) {
2545  sub2video_update(ist, &subtitle);
2546  } else if (ist->nb_filters) {
2547  if (!ist->sub2video.sub_queue)
2548  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2549  if (!ist->sub2video.sub_queue)
2550  exit_program(1);
2551  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2553  if (ret < 0)
2554  exit_program(1);
2555  }
2556  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2557  free_sub = 0;
2558  }
2559 
2560  if (!subtitle.num_rects)
2561  goto out;
2562 
2563  ist->frames_decoded++;
2564 
2565  for (i = 0; i < nb_output_streams; i++) {
2566  OutputStream *ost = output_streams[i];
2567 
2568  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2569  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2570  continue;
2571 
2572  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2573  }
2574 
2575 out:
2576  if (free_sub)
2577  avsubtitle_free(&subtitle);
2578  return ret;
2579 }
2580 
2582 {
2583  int i, ret;
2584  /* TODO keep pts also in stream time base to avoid converting back */
2585  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2587 
2588  for (i = 0; i < ist->nb_filters; i++) {
2589  ret = ifilter_send_eof(ist->filters[i], pts);
2590  if (ret < 0)
2591  return ret;
2592  }
2593  return 0;
2594 }
2595 
2596 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2597 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2598 {
2599  int ret = 0, i;
2600  int repeating = 0;
2601  int eof_reached = 0;
2602 
2603  AVPacket avpkt;
2604  if (!ist->saw_first_ts) {
2605  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2606  ist->pts = 0;
2607  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2608  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2609  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2610  }
2611  ist->saw_first_ts = 1;
2612  }
2613 
2614  if (ist->next_dts == AV_NOPTS_VALUE)
2615  ist->next_dts = ist->dts;
2616  if (ist->next_pts == AV_NOPTS_VALUE)
2617  ist->next_pts = ist->pts;
2618 
2619  if (!pkt) {
2620  /* EOF handling */
2621  av_init_packet(&avpkt);
2622  avpkt.data = NULL;
2623  avpkt.size = 0;
2624  } else {
2625  avpkt = *pkt;
2626  }
2627 
2628  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2629  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2630  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2631  ist->next_pts = ist->pts = ist->dts;
2632  }
2633 
2634  // while we have more to decode or while the decoder did output something on EOF
2635  while (ist->decoding_needed) {
2636  int64_t duration_dts = 0;
2637  int64_t duration_pts = 0;
2638  int got_output = 0;
2639  int decode_failed = 0;
2640 
2641  ist->pts = ist->next_pts;
2642  ist->dts = ist->next_dts;
2643 
2644  switch (ist->dec_ctx->codec_type) {
2645  case AVMEDIA_TYPE_AUDIO:
2646  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2647  &decode_failed);
2648  break;
2649  case AVMEDIA_TYPE_VIDEO:
2650  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2651  &decode_failed);
2652  if (!repeating || !pkt || got_output) {
2653  if (pkt && pkt->duration) {
2654  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2655  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2657  duration_dts = ((int64_t)AV_TIME_BASE *
2658  ist->dec_ctx->framerate.den * ticks) /
2660  }
2661 
2662  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2663  ist->next_dts += duration_dts;
2664  }else
2665  ist->next_dts = AV_NOPTS_VALUE;
2666  }
2667 
2668  if (got_output) {
2669  if (duration_pts > 0) {
2670  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2671  } else {
2672  ist->next_pts += duration_dts;
2673  }
2674  }
2675  break;
2676  case AVMEDIA_TYPE_SUBTITLE:
2677  if (repeating)
2678  break;
2679  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2680  if (!pkt && ret >= 0)
2681  ret = AVERROR_EOF;
2682  break;
2683  default:
2684  return -1;
2685  }
2686 
2687  if (ret == AVERROR_EOF) {
2688  eof_reached = 1;
2689  break;
2690  }
2691 
2692  if (ret < 0) {
2693  if (decode_failed) {
2694  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2695  ist->file_index, ist->st->index, av_err2str(ret));
2696  } else {
2697  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2698  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2699  }
2700  if (!decode_failed || exit_on_error)
2701  exit_program(1);
2702  break;
2703  }
2704 
2705  if (got_output)
2706  ist->got_output = 1;
2707 
2708  if (!got_output)
2709  break;
2710 
2711  // During draining, we might get multiple output frames in this loop.
2712  // ffmpeg.c does not drain the filter chain on configuration changes,
2713  // which means if we send multiple frames at once to the filters, and
2714  // one of those frames changes configuration, the buffered frames will
2715  // be lost. This can upset certain FATE tests.
2716  // Decode only 1 frame per call on EOF to appease these FATE tests.
2717  // The ideal solution would be to rewrite decoding to use the new
2718  // decoding API in a better way.
2719  if (!pkt)
2720  break;
2721 
2722  repeating = 1;
2723  }
2724 
2725  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2726  /* except when looping we need to flush but not to send an EOF */
2727  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2728  int ret = send_filter_eof(ist);
2729  if (ret < 0) {
2730  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2731  exit_program(1);
2732  }
2733  }
2734 
2735  /* handle stream copy */
2736  if (!ist->decoding_needed) {
2737  ist->dts = ist->next_dts;
2738  switch (ist->dec_ctx->codec_type) {
2739  case AVMEDIA_TYPE_AUDIO:
2740  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2741  ist->dec_ctx->sample_rate;
2742  break;
2743  case AVMEDIA_TYPE_VIDEO:
2744  if (ist->framerate.num) {
2745  // TODO: Remove work-around for c99-to-c89 issue 7
2746  AVRational time_base_q = AV_TIME_BASE_Q;
2747  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2748  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2749  } else if (pkt->duration) {
2750  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2751  } else if(ist->dec_ctx->framerate.num != 0) {
2752  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2753  ist->next_dts += ((int64_t)AV_TIME_BASE *
2754  ist->dec_ctx->framerate.den * ticks) /
2756  }
2757  break;
2758  }
2759  ist->pts = ist->dts;
2760  ist->next_pts = ist->next_dts;
2761  }
2762  for (i = 0; pkt && i < nb_output_streams; i++) {
2763  OutputStream *ost = output_streams[i];
2764 
2765  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2766  continue;
2767 
2768  do_streamcopy(ist, ost, pkt);
2769  }
2770 
2771  return !eof_reached;
2772 }
2773 
2774 static void print_sdp(void)
2775 {
2776  char sdp[16384];
2777  int i;
2778  int j;
2779  AVIOContext *sdp_pb;
2780  AVFormatContext **avc;
2781 
2782  for (i = 0; i < nb_output_files; i++) {
2783  if (!output_files[i]->header_written)
2784  return;
2785  }
2786 
2787  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2788  if (!avc)
2789  exit_program(1);
2790  for (i = 0, j = 0; i < nb_output_files; i++) {
2791  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2792  avc[j] = output_files[i]->ctx;
2793  j++;
2794  }
2795  }
2796 
2797  if (!j)
2798  goto fail;
2799 
2800  av_sdp_create(avc, j, sdp, sizeof(sdp));
2801 
2802  if (!sdp_filename) {
2803  printf("SDP:\n%s\n", sdp);
2804  fflush(stdout);
2805  } else {
2806  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2807  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2808  } else {
2809  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2810  avio_closep(&sdp_pb);
2812  }
2813  }
2814 
2815 fail:
2816  av_freep(&avc);
2817 }
2818 
2820 {
2821  int i;
2822  for (i = 0; hwaccels[i].name; i++)
2823  if (hwaccels[i].pix_fmt == pix_fmt)
2824  return &hwaccels[i];
2825  return NULL;
2826 }
2827 
2829 {
2830  InputStream *ist = s->opaque;
2831  const enum AVPixelFormat *p;
2832  int ret;
2833 
2834  for (p = pix_fmts; *p != -1; p++) {
2836  const HWAccel *hwaccel;
2837 
2838  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2839  break;
2840 
2841  hwaccel = get_hwaccel(*p);
2842  if (!hwaccel ||
2843  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2844  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2845  continue;
2846 
2847  ret = hwaccel->init(s);
2848  if (ret < 0) {
2849  if (ist->hwaccel_id == hwaccel->id) {
2851  "%s hwaccel requested for input stream #%d:%d, "
2852  "but cannot be initialized.\n", hwaccel->name,
2853  ist->file_index, ist->st->index);
2854  return AV_PIX_FMT_NONE;
2855  }
2856  continue;
2857  }
2858 
2859  if (ist->hw_frames_ctx) {
2861  if (!s->hw_frames_ctx)
2862  return AV_PIX_FMT_NONE;
2863  }
2864 
2865  ist->active_hwaccel_id = hwaccel->id;
2866  ist->hwaccel_pix_fmt = *p;
2867  break;
2868  }
2869 
2870  return *p;
2871 }
2872 
2874 {
2875  InputStream *ist = s->opaque;
2876 
2877  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2878  return ist->hwaccel_get_buffer(s, frame, flags);
2879 
2880  return avcodec_default_get_buffer2(s, frame, flags);
2881 }
2882 
2883 static int init_input_stream(int ist_index, char *error, int error_len)
2884 {
2885  int ret;
2886  InputStream *ist = input_streams[ist_index];
2887 
2888  if (ist->decoding_needed) {
2889  AVCodec *codec = ist->dec;
2890  if (!codec) {
2891  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2892  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2893  return AVERROR(EINVAL);
2894  }
2895 
2896  ist->dec_ctx->opaque = ist;
2897  ist->dec_ctx->get_format = get_format;
2898  ist->dec_ctx->get_buffer2 = get_buffer;
2899  ist->dec_ctx->thread_safe_callbacks = 1;
2900 
2901  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2902  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2903  (ist->decoding_needed & DECODING_FOR_OST)) {
2904  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2906  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2907  }
2908 
2909  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2910 
2911  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2912  * audio, and video decoders such as cuvid or mediacodec */
2914 
2915  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2916  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2917  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2919  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2920 
2921  ret = hw_device_setup_for_decode(ist);
2922  if (ret < 0) {
2923  snprintf(error, error_len, "Device setup failed for "
2924  "decoder on input stream #%d:%d : %s",
2925  ist->file_index, ist->st->index, av_err2str(ret));
2926  return ret;
2927  }
2928 
2929  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2930  if (ret == AVERROR_EXPERIMENTAL)
2931  abort_codec_experimental(codec, 0);
2932 
2933  snprintf(error, error_len,
2934  "Error while opening decoder for input stream "
2935  "#%d:%d : %s",
2936  ist->file_index, ist->st->index, av_err2str(ret));
2937  return ret;
2938  }
2940  }
2941 
2942  ist->next_pts = AV_NOPTS_VALUE;
2943  ist->next_dts = AV_NOPTS_VALUE;
2944 
2945  return 0;
2946 }
2947 
2949 {
2950  if (ost->source_index >= 0)
2951  return input_streams[ost->source_index];
2952  return NULL;
2953 }
2954 
2955 static int compare_int64(const void *a, const void *b)
2956 {
2957  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2958 }
2959 
2960 /* open the muxer when all the streams are initialized */
2961 static int check_init_output_file(OutputFile *of, int file_index)
2962 {
2963  int ret, i;
2964 
2965  for (i = 0; i < of->ctx->nb_streams; i++) {
2966  OutputStream *ost = output_streams[of->ost_index + i];
2967  if (!ost->initialized)
2968  return 0;
2969  }
2970 
2971  of->ctx->interrupt_callback = int_cb;
2972 
2973  ret = avformat_write_header(of->ctx, &of->opts);
2974  if (ret < 0) {
2976  "Could not write header for output file #%d "
2977  "(incorrect codec parameters ?): %s\n",
2978  file_index, av_err2str(ret));
2979  return ret;
2980  }
2981  //assert_avoptions(of->opts);
2982  of->header_written = 1;
2983 
2984  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2985 
2986  if (sdp_filename || want_sdp)
2987  print_sdp();
2988 
2989  /* flush the muxing queues */
2990  for (i = 0; i < of->ctx->nb_streams; i++) {
2991  OutputStream *ost = output_streams[of->ost_index + i];
2992 
2993  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2994  if (!av_fifo_size(ost->muxing_queue))
2995  ost->mux_timebase = ost->st->time_base;
2996 
2997  while (av_fifo_size(ost->muxing_queue)) {
2998  AVPacket pkt;
2999  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3000  write_packet(of, &pkt, ost, 1);
3001  }
3002  }
3003 
3004  return 0;
3005 }
3006 
3008 {
3009  AVBSFContext *ctx;
3010  int i, ret;
3011 
3012  if (!ost->nb_bitstream_filters)
3013  return 0;
3014 
3015  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3016  ctx = ost->bsf_ctx[i];
3017 
3018  ret = avcodec_parameters_copy(ctx->par_in,
3019  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3020  if (ret < 0)
3021  return ret;
3022 
3023  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3024 
3025  ret = av_bsf_init(ctx);
3026  if (ret < 0) {
3027  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3028  ost->bsf_ctx[i]->filter->name);
3029  return ret;
3030  }
3031  }
3032 
3033  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3034  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3035  if (ret < 0)
3036  return ret;
3037 
3038  ost->st->time_base = ctx->time_base_out;
3039 
3040  return 0;
3041 }
3042 
3044 {
3045  OutputFile *of = output_files[ost->file_index];
3046  InputStream *ist = get_input_stream(ost);
3047  AVCodecParameters *par_dst = ost->st->codecpar;
3048  AVCodecParameters *par_src = ost->ref_par;
3049  AVRational sar;
3050  int i, ret;
3051  uint32_t codec_tag = par_dst->codec_tag;
3052 
3053  av_assert0(ist && !ost->filter);
3054 
3055  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3056  if (ret >= 0)
3057  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3058  if (ret < 0) {
3060  "Error setting up codec context options.\n");
3061  return ret;
3062  }
3064 
3065  if (!codec_tag) {
3066  unsigned int codec_tag_tmp;
3067  if (!of->ctx->oformat->codec_tag ||
3068  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3069  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3070  codec_tag = par_src->codec_tag;
3071  }
3072 
3073  ret = avcodec_parameters_copy(par_dst, par_src);
3074  if (ret < 0)
3075  return ret;
3076 
3077  par_dst->codec_tag = codec_tag;
3078 
3079  if (!ost->frame_rate.num)
3080  ost->frame_rate = ist->framerate;
3081  ost->st->avg_frame_rate = ost->frame_rate;
3082 
3084  if (ret < 0)
3085  return ret;
3086 
3087  // copy timebase while removing common factors
3088  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3090 
3091  // copy estimated duration as a hint to the muxer
3092  if (ost->st->duration <= 0 && ist->st->duration > 0)
3093  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3094 
3095  // copy disposition
3096  ost->st->disposition = ist->st->disposition;
3097 
3098  if (ist->st->nb_side_data) {
3099  for (i = 0; i < ist->st->nb_side_data; i++) {
3100  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3101  uint8_t *dst_data;
3102 
3103  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3104  if (!dst_data)
3105  return AVERROR(ENOMEM);
3106  memcpy(dst_data, sd_src->data, sd_src->size);
3107  }
3108  }
3109 
3110  if (ost->rotate_overridden) {
3112  sizeof(int32_t) * 9);
3113  if (sd)
3114  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3115  }
3116 
3117  ost->parser = av_parser_init(par_dst->codec_id);
3118  ost->parser_avctx = avcodec_alloc_context3(NULL);
3119  if (!ost->parser_avctx)
3120  return AVERROR(ENOMEM);
3121 
3122  switch (par_dst->codec_type) {
3123  case AVMEDIA_TYPE_AUDIO:
3124  if (audio_volume != 256) {
3125  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3126  exit_program(1);
3127  }
3128  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3129  par_dst->block_align= 0;
3130  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3131  par_dst->block_align= 0;
3132  break;
3133  case AVMEDIA_TYPE_VIDEO:
3134  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3135  sar =
3136  av_mul_q(ost->frame_aspect_ratio,
3137  (AVRational){ par_dst->height, par_dst->width });
3138  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3139  "with stream copy may produce invalid files\n");
3140  }
3141  else if (ist->st->sample_aspect_ratio.num)
3142  sar = ist->st->sample_aspect_ratio;
3143  else
3144  sar = par_src->sample_aspect_ratio;
3145  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3146  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3147  ost->st->r_frame_rate = ist->st->r_frame_rate;
3148  break;
3149  }
3150 
3151  ost->mux_timebase = ist->st->time_base;
3152 
3153  return 0;
3154 }
3155 
3157 {
3158  AVDictionaryEntry *e;
3159 
3160  uint8_t *encoder_string;
3161  int encoder_string_len;
3162  int format_flags = 0;
3163  int codec_flags = 0;
3164 
3165  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3166  return;
3167 
3168  e = av_dict_get(of->opts, "fflags", NULL, 0);
3169  if (e) {
3170  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3171  if (!o)
3172  return;
3173  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3174  }
3175  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3176  if (e) {
3177  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3178  if (!o)
3179  return;
3180  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3181  }
3182 
3183  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3184  encoder_string = av_mallocz(encoder_string_len);
3185  if (!encoder_string)
3186  exit_program(1);
3187 
3188  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3189  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3190  else
3191  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3192  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3193  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3195 }
3196 
3197 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3198  AVCodecContext *avctx)
3199 {
3200  char *p;
3201  int n = 1, i, size, index = 0;
3202  int64_t t, *pts;
3203 
3204  for (p = kf; *p; p++)
3205  if (*p == ',')
3206  n++;
3207  size = n;
3208  pts = av_malloc_array(size, sizeof(*pts));
3209  if (!pts) {
3210  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3211  exit_program(1);
3212  }
3213 
3214  p = kf;
3215  for (i = 0; i < n; i++) {
3216  char *next = strchr(p, ',');
3217 
3218  if (next)
3219  *next++ = 0;
3220 
3221  if (!memcmp(p, "chapters", 8)) {
3222 
3223  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3224  int j;
3225 
3226  if (avf->nb_chapters > INT_MAX - size ||
3227  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3228  sizeof(*pts)))) {
3230  "Could not allocate forced key frames array.\n");
3231  exit_program(1);
3232  }
3233  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3234  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3235 
3236  for (j = 0; j < avf->nb_chapters; j++) {
3237  AVChapter *c = avf->chapters[j];
3238  av_assert1(index < size);
3239  pts[index++] = av_rescale_q(c->start, c->time_base,
3240  avctx->time_base) + t;
3241  }
3242 
3243  } else {
3244 
3245  t = parse_time_or_die("force_key_frames", p, 1);
3246  av_assert1(index < size);
3247  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3248 
3249  }
3250 
3251  p = next;
3252  }
3253 
3254  av_assert0(index == size);
3255  qsort(pts, size, sizeof(*pts), compare_int64);
3256  ost->forced_kf_count = size;
3257  ost->forced_kf_pts = pts;
3258 }
3259 
3260 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3261 {
3262  InputStream *ist = get_input_stream(ost);
3263  AVCodecContext *enc_ctx = ost->enc_ctx;
3264  AVFormatContext *oc;
3265 
3266  if (ost->enc_timebase.num > 0) {
3267  enc_ctx->time_base = ost->enc_timebase;
3268  return;
3269  }
3270 
3271  if (ost->enc_timebase.num < 0) {
3272  if (ist) {
3273  enc_ctx->time_base = ist->st->time_base;
3274  return;
3275  }
3276 
3277  oc = output_files[ost->file_index]->ctx;
3278  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3279  }
3280 
3281  enc_ctx->time_base = default_time_base;
3282 }
3283 
3285 {
3286  InputStream *ist = get_input_stream(ost);
3287  AVCodecContext *enc_ctx = ost->enc_ctx;
3289  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3290  int j, ret;
3291 
3292  set_encoder_id(output_files[ost->file_index], ost);
3293 
3294  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3295  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3296  // which have to be filtered out to prevent leaking them to output files.
3297  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3298 
3299  if (ist) {
3300  ost->st->disposition = ist->st->disposition;
3301 
3302  dec_ctx = ist->dec_ctx;
3303 
3304  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3305  } else {
3306  for (j = 0; j < oc->nb_streams; j++) {
3307  AVStream *st = oc->streams[j];
3308  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3309  break;
3310  }
3311  if (j == oc->nb_streams)
3312  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3315  }
3316 
3317  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3318  if (!ost->frame_rate.num)
3320  if (ist && !ost->frame_rate.num)
3321  ost->frame_rate = ist->framerate;
3322  if (ist && !ost->frame_rate.num)
3323  ost->frame_rate = ist->st->r_frame_rate;
3324  if (ist && !ost->frame_rate.num) {
3325  ost->frame_rate = (AVRational){25, 1};
3327  "No information "
3328  "about the input framerate is available. Falling "
3329  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3330  "if you want a different framerate.\n",
3331  ost->file_index, ost->index);
3332  }
3333 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3334  if (ost->enc->supported_framerates && !ost->force_fps) {
3335  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3336  ost->frame_rate = ost->enc->supported_framerates[idx];
3337  }
3338  // reduce frame rate for mpeg4 to be within the spec limits
3339  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3340  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3341  ost->frame_rate.num, ost->frame_rate.den, 65535);
3342  }
3343  }
3344 
3345  switch (enc_ctx->codec_type) {
3346  case AVMEDIA_TYPE_AUDIO:
3348  if (dec_ctx)
3349  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3350  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3354 
3355  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3356  break;
3357 
3358  case AVMEDIA_TYPE_VIDEO:
3360 
3361  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3363  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3365  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3366  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3367  }
3368  for (j = 0; j < ost->forced_kf_count; j++)
3369  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3371  enc_ctx->time_base);
3372 
3373  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3374  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3375  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3376  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3377  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3379 
3380  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3381  if (dec_ctx)
3382  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3383  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3384 
3385  enc_ctx->framerate = ost->frame_rate;
3386 
3387  ost->st->avg_frame_rate = ost->frame_rate;
3388 
3389  if (!dec_ctx ||
3390  enc_ctx->width != dec_ctx->width ||
3391  enc_ctx->height != dec_ctx->height ||
3392  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3394  }
3395 
3396  if (ost->forced_keyframes) {
3397  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3400  if (ret < 0) {
3402  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3403  return ret;
3404  }
3409 
3410  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3411  // parse it only for static kf timings
3412  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3414  }
3415  }
3416  break;
3417  case AVMEDIA_TYPE_SUBTITLE:
3418  enc_ctx->time_base = AV_TIME_BASE_Q;
3419  if (!enc_ctx->width) {
3420  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3421  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3422  }
3423  break;
3424  case AVMEDIA_TYPE_DATA:
3425  break;
3426  default:
3427  abort();
3428  break;
3429  }
3430 
3431  ost->mux_timebase = enc_ctx->time_base;
3432 
3433  return 0;
3434 }
3435 
3436 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3437 {
3438  int ret = 0;
3439 
3440  if (ost->encoding_needed) {
3441  AVCodec *codec = ost->enc;
3442  AVCodecContext *dec = NULL;
3443  InputStream *ist;
3444 
3445  ret = init_output_stream_encode(ost);
3446  if (ret < 0)
3447  return ret;
3448 
3449  if ((ist = get_input_stream(ost)))
3450  dec = ist->dec_ctx;
3451  if (dec && dec->subtitle_header) {
3452  /* ASS code assumes this buffer is null terminated so add extra byte. */
3454  if (!ost->enc_ctx->subtitle_header)
3455  return AVERROR(ENOMEM);
3456  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3458  }
3459  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3460  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3461  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3462  !codec->defaults &&
3463  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3464  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3465  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3466 
3467  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3471  if (!ost->enc_ctx->hw_frames_ctx)
3472  return AVERROR(ENOMEM);
3473  } else {
3474  ret = hw_device_setup_for_encode(ost);
3475  if (ret < 0) {
3476  snprintf(error, error_len, "Device setup failed for "
3477  "encoder on output stream #%d:%d : %s",
3478  ost->file_index, ost->index, av_err2str(ret));
3479  return ret;
3480  }
3481  }
3482 
3483  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3484  if (ret == AVERROR_EXPERIMENTAL)
3485  abort_codec_experimental(codec, 1);
3486  snprintf(error, error_len,
3487  "Error while opening encoder for output stream #%d:%d - "
3488  "maybe incorrect parameters such as bit_rate, rate, width or height",
3489  ost->file_index, ost->index);
3490  return ret;
3491  }
3492  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3493  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3495  ost->enc_ctx->frame_size);
3497  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3498  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3499  " It takes bits/s as argument, not kbits/s\n");
3500 
3502  if (ret < 0) {
3504  "Error initializing the output stream codec context.\n");
3505  exit_program(1);
3506  }
3507  /*
3508  * FIXME: ost->st->codec should't be needed here anymore.
3509  */
3510  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3511  if (ret < 0)
3512  return ret;
3513 
3514  if (ost->enc_ctx->nb_coded_side_data) {
3515  int i;
3516 
3517  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3518  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3519  uint8_t *dst_data;
3520 
3521  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3522  if (!dst_data)
3523  return AVERROR(ENOMEM);
3524  memcpy(dst_data, sd_src->data, sd_src->size);
3525  }
3526  }
3527 
3528  /*
3529  * Add global input side data. For now this is naive, and copies it
3530  * from the input stream's global side data. All side data should
3531  * really be funneled over AVFrame and libavfilter, then added back to
3532  * packet side data, and then potentially using the first packet for
3533  * global side data.
3534  */
3535  if (ist) {
3536  int i;
3537  for (i = 0; i < ist->st->nb_side_data; i++) {
3538  AVPacketSideData *sd = &ist->st->side_data[i];
3539  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3540  if (!dst)
3541  return AVERROR(ENOMEM);
3542  memcpy(dst, sd->data, sd->size);
3543  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3544  av_display_rotation_set((uint32_t *)dst, 0);
3545  }
3546  }
3547 
3548  // copy timebase while removing common factors
3549  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3550  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3551 
3552  // copy estimated duration as a hint to the muxer
3553  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3554  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3555 
3556  ost->st->codec->codec= ost->enc_ctx->codec;
3557  } else if (ost->stream_copy) {
3558  ret = init_output_stream_streamcopy(ost);
3559  if (ret < 0)
3560  return ret;
3561 
3562  /*
3563  * FIXME: will the codec context used by the parser during streamcopy
3564  * This should go away with the new parser API.
3565  */
3566  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3567  if (ret < 0)
3568  return ret;
3569  }
3570 
3571  // parse user provided disposition, and update stream values
3572  if (ost->disposition) {
3573  static const AVOption opts[] = {
3574  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3575  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3576  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3577  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3578  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3579  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3580  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3581  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3582  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3583  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3584  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3585  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3586  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3587  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3588  { NULL },
3589  };
3590  static const AVClass class = {
3591  .class_name = "",
3592  .item_name = av_default_item_name,
3593  .option = opts,
3594  .version = LIBAVUTIL_VERSION_INT,
3595  };
3596  const AVClass *pclass = &class;
3597 
3598  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3599  if (ret < 0)
3600  return ret;
3601  }
3602 
3603  /* initialize bitstream filters for the output stream
3604  * needs to be done here, because the codec id for streamcopy is not
3605  * known until now */
3606  ret = init_output_bsfs(ost);
3607  if (ret < 0)
3608  return ret;
3609 
3610  ost->initialized = 1;
3611 
3612  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3613  if (ret < 0)
3614  return ret;
3615 
3616  return ret;
3617 }
3618 
3619 static void report_new_stream(int input_index, AVPacket *pkt)
3620 {
3621  InputFile *file = input_files[input_index];
3622  AVStream *st = file->ctx->streams[pkt->stream_index];
3623 
3624  if (pkt->stream_index < file->nb_streams_warn)
3625  return;
3626  av_log(file->ctx, AV_LOG_WARNING,
3627  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3629  input_index, pkt->stream_index,
3630  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3631  file->nb_streams_warn = pkt->stream_index + 1;
3632 }
3633 
3634 static int transcode_init(void)
3635 {
3636  int ret = 0, i, j, k;
3637  AVFormatContext *oc;
3638  OutputStream *ost;
3639  InputStream *ist;
3640  char error[1024] = {0};
3641 
3642  for (i = 0; i < nb_filtergraphs; i++) {
3643  FilterGraph *fg = filtergraphs[i];
3644  for (j = 0; j < fg->nb_outputs; j++) {
3645  OutputFilter *ofilter = fg->outputs[j];
3646  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3647  continue;
3648  if (fg->nb_inputs != 1)
3649  continue;
3650  for (k = nb_input_streams-1; k >= 0 ; k--)
3651  if (fg->inputs[0]->ist == input_streams[k])
3652  break;
3653  ofilter->ost->source_index = k;
3654  }
3655  }
3656 
3657  /* init framerate emulation */
3658  for (i = 0; i < nb_input_files; i++) {
3659  InputFile *ifile = input_files[i];
3660  if (ifile->rate_emu)
3661  for (j = 0; j < ifile->nb_streams; j++)
3662  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3663  }
3664 
3665  /* init input streams */
3666  for (i = 0; i < nb_input_streams; i++)
3667  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3668  for (i = 0; i < nb_output_streams; i++) {
3669  ost = output_streams[i];
3670  avcodec_close(ost->enc_ctx);
3671  }
3672  goto dump_format;
3673  }
3674 
3675  /* open each encoder */
3676  for (i = 0; i < nb_output_streams; i++) {
3677  // skip streams fed from filtergraphs until we have a frame for them
3678  if (output_streams[i]->filter)
3679  continue;
3680 
3681  ret = init_output_stream(output_streams[i], error, sizeof(error));
3682  if (ret < 0)
3683  goto dump_format;
3684  }
3685 
3686  /* discard unused programs */
3687  for (i = 0; i < nb_input_files; i++) {
3688  InputFile *ifile = input_files[i];
3689  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3690  AVProgram *p = ifile->ctx->programs[j];
3691  int discard = AVDISCARD_ALL;
3692 
3693  for (k = 0; k < p->nb_stream_indexes; k++)
3694  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3695  discard = AVDISCARD_DEFAULT;
3696  break;
3697  }
3698  p->discard = discard;
3699  }
3700  }
3701 
3702  /* write headers for files with no streams */
3703  for (i = 0; i < nb_output_files; i++) {
3704  oc = output_files[i]->ctx;
3705  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3706  ret = check_init_output_file(output_files[i], i);
3707  if (ret < 0)
3708  goto dump_format;
3709  }
3710  }
3711 
3712  dump_format:
3713  /* dump the stream mapping */
3714  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3715  for (i = 0; i < nb_input_streams; i++) {
3716  ist = input_streams[i];
3717 
3718  for (j = 0; j < ist->nb_filters; j++) {
3719  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3720  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3721  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3722  ist->filters[j]->name);
3723  if (nb_filtergraphs > 1)
3724  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3725  av_log(NULL, AV_LOG_INFO, "\n");
3726  }
3727  }
3728  }
3729 
3730  for (i = 0; i < nb_output_streams; i++) {
3731  ost = output_streams[i];
3732 
3733  if (ost->attachment_filename) {
3734  /* an attached file */
3735  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3736  ost->attachment_filename, ost->file_index, ost->index);
3737  continue;
3738  }
3739 
3740  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3741  /* output from a complex graph */
3742  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3743  if (nb_filtergraphs > 1)
3744  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3745 
3746  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3747  ost->index, ost->enc ? ost->enc->name : "?");
3748  continue;
3749  }
3750 
3751  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3752  input_streams[ost->source_index]->file_index,
3753  input_streams[ost->source_index]->st->index,
3754  ost->file_index,
3755  ost->index);
3756  if (ost->sync_ist != input_streams[ost->source_index])
3757  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3758  ost->sync_ist->file_index,
3759  ost->sync_ist->st->index);
3760  if (ost->stream_copy)
3761  av_log(NULL, AV_LOG_INFO, " (copy)");
3762  else {
3763  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3764  const AVCodec *out_codec = ost->enc;
3765  const char *decoder_name = "?";
3766  const char *in_codec_name = "?";
3767  const char *encoder_name = "?";
3768  const char *out_codec_name = "?";
3769  const AVCodecDescriptor *desc;
3770 
3771  if (in_codec) {
3772  decoder_name = in_codec->name;
3773  desc = avcodec_descriptor_get(in_codec->id);
3774  if (desc)
3775  in_codec_name = desc->name;
3776  if (!strcmp(decoder_name, in_codec_name))
3777  decoder_name = "native";
3778  }
3779 
3780  if (out_codec) {
3781  encoder_name = out_codec->name;
3782  desc = avcodec_descriptor_get(out_codec->id);
3783  if (desc)
3784  out_codec_name = desc->name;
3785  if (!strcmp(encoder_name, out_codec_name))
3786  encoder_name = "native";
3787  }
3788 
3789  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3790  in_codec_name, decoder_name,
3791  out_codec_name, encoder_name);
3792  }
3793  av_log(NULL, AV_LOG_INFO, "\n");
3794  }
3795 
3796  if (ret) {
3797  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3798  return ret;
3799  }
3800 
3802 
3803  return 0;
3804 }
3805 
3806 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3807 static int need_output(void)
3808 {
3809  int i;
3810 
3811  for (i = 0; i < nb_output_streams; i++) {
3812  OutputStream *ost = output_streams[i];
3813  OutputFile *of = output_files[ost->file_index];
3814  AVFormatContext *os = output_files[ost->file_index]->ctx;
3815 
3816  if (ost->finished ||
3817  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3818  continue;
3819  if (ost->frame_number >= ost->max_frames) {
3820  int j;
3821  for (j = 0; j < of->ctx->nb_streams; j++)
3822  close_output_stream(output_streams[of->ost_index + j]);
3823  continue;
3824  }
3825 
3826  return 1;
3827  }
3828 
3829  return 0;
3830 }
3831 
3832 /**
3833  * Select the output stream to process.
3834  *
3835  * @return selected output stream, or NULL if none available
3836  */
3838 {
3839  int i;
3840  int64_t opts_min = INT64_MAX;
3841  OutputStream *ost_min = NULL;
3842 
3843  for (i = 0; i < nb_output_streams; i++) {
3844  OutputStream *ost = output_streams[i];
3845  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3846  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3847  AV_TIME_BASE_Q);
3848  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3849  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3850 
3851  if (!ost->initialized && !ost->inputs_done)
3852  return ost;
3853 
3854  if (!ost->finished && opts < opts_min) {
3855  opts_min = opts;
3856  ost_min = ost->unavailable ? NULL : ost;
3857  }
3858  }
3859  return ost_min;
3860 }
3861 
3862 static void set_tty_echo(int on)
3863 {
3864 #if HAVE_TERMIOS_H
3865  struct termios tty;
3866  if (tcgetattr(0, &tty) == 0) {
3867  if (on) tty.c_lflag |= ECHO;
3868  else tty.c_lflag &= ~ECHO;
3869  tcsetattr(0, TCSANOW, &tty);
3870  }
3871 #endif
3872 }
3873 
3875 {
3876  int i, ret, key;
3877  static int64_t last_time;
3878  if (received_nb_signals)
3879  return AVERROR_EXIT;
3880  /* read_key() returns 0 on EOF */
3881  if(cur_time - last_time >= 100000 && !run_as_daemon){
3882  key = read_key();
3883  last_time = cur_time;
3884  }else
3885  key = -1;
3886  if (key == 'q')
3887  return AVERROR_EXIT;
3888  if (key == '+') av_log_set_level(av_log_get_level()+10);
3889  if (key == '-') av_log_set_level(av_log_get_level()-10);
3890  if (key == 's') qp_hist ^= 1;
3891  if (key == 'h'){
3892  if (do_hex_dump){
3893  do_hex_dump = do_pkt_dump = 0;
3894  } else if(do_pkt_dump){
3895  do_hex_dump = 1;
3896  } else
3897  do_pkt_dump = 1;
3899  }
3900  if (key == 'c' || key == 'C'){
3901  char buf[4096], target[64], command[256], arg[256] = {0};
3902  double time;
3903  int k, n = 0;
3904  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3905  i = 0;
3906  set_tty_echo(1);
3907  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3908  if (k > 0)
3909  buf[i++] = k;
3910  buf[i] = 0;
3911  set_tty_echo(0);
3912  fprintf(stderr, "\n");
3913  if (k > 0 &&
3914  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3915  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3916  target, time, command, arg);
3917  for (i = 0; i < nb_filtergraphs; i++) {
3918  FilterGraph *fg = filtergraphs[i];
3919  if (fg->graph) {
3920  if (time < 0) {
3921  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3922  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3923  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3924  } else if (key == 'c') {
3925  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3926  ret = AVERROR_PATCHWELCOME;
3927  } else {
3928  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3929  if (ret < 0)
3930  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3931  }
3932  }
3933  }
3934  } else {
3936  "Parse error, at least 3 arguments were expected, "
3937  "only %d given in string '%s'\n", n, buf);
3938  }
3939  }
3940  if (key == 'd' || key == 'D'){
3941  int debug=0;
3942  if(key == 'D') {
3943  debug = input_streams[0]->st->codec->debug<<1;
3944  if(!debug) debug = 1;
3945  while(debug & (FF_DEBUG_DCT_COEFF
3946 #if FF_API_DEBUG_MV
3948 #endif
3949  )) //unsupported, would just crash
3950  debug += debug;
3951  }else{
3952  char buf[32];
3953  int k = 0;
3954  i = 0;
3955  set_tty_echo(1);
3956  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3957  if (k > 0)
3958  buf[i++] = k;
3959  buf[i] = 0;
3960  set_tty_echo(0);
3961  fprintf(stderr, "\n");
3962  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3963  fprintf(stderr,"error parsing debug value\n");
3964  }
3965  for(i=0;i<nb_input_streams;i++) {
3966  input_streams[i]->st->codec->debug = debug;
3967  }
3968  for(i=0;i<nb_output_streams;i++) {
3969  OutputStream *ost = output_streams[i];
3970  ost->enc_ctx->debug = debug;
3971  }
3972  if(debug) av_log_set_level(AV_LOG_DEBUG);
3973  fprintf(stderr,"debug=%d\n", debug);
3974  }
3975  if (key == '?'){
3976  fprintf(stderr, "key function\n"
3977  "? show this help\n"
3978  "+ increase verbosity\n"
3979  "- decrease verbosity\n"
3980  "c Send command to first matching filter supporting it\n"
3981  "C Send/Queue command to all matching filters\n"
3982  "D cycle through available debug modes\n"
3983  "h dump packets/hex press to cycle through the 3 states\n"
3984  "q quit\n"
3985  "s Show QP histogram\n"
3986  );
3987  }
3988  return 0;
3989 }
3990 
3991 #if HAVE_PTHREADS
3992 static void *input_thread(void *arg)
3993 {
3994  InputFile *f = arg;
3995  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3996  int ret = 0;
3997 
3998  while (1) {
3999  AVPacket pkt;
4000  ret = av_read_frame(f->ctx, &pkt);
4001 
4002  if (ret == AVERROR(EAGAIN)) {
4003  av_usleep(10000);
4004  continue;
4005  }
4006  if (ret < 0) {
4007  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4008  break;
4009  }
4010  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4011  if (flags && ret == AVERROR(EAGAIN)) {
4012  flags = 0;
4013  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4015  "Thread message queue blocking; consider raising the "
4016  "thread_queue_size option (current value: %d)\n",
4017  f->thread_queue_size);
4018  }
4019  if (ret < 0) {
4020  if (ret != AVERROR_EOF)
4021  av_log(f->ctx, AV_LOG_ERROR,
4022  "Unable to send packet to main thread: %s\n",
4023  av_err2str(ret));
4024  av_packet_unref(&pkt);
4025  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4026  break;
4027  }
4028  }
4029 
4030  return NULL;
4031 }
4032 
4033 static void free_input_threads(void)
4034 {
4035  int i;
4036 
4037  for (i = 0; i < nb_input_files; i++) {
4038  InputFile *f = input_files[i];
4039  AVPacket pkt;
4040 
4041  if (!f || !f->in_thread_queue)
4042  continue;
4044  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4045  av_packet_unref(&pkt);
4046 
4047  pthread_join(f->thread, NULL);
4048  f->joined = 1;
4049  av_thread_message_queue_free(&f->in_thread_queue);
4050  }
4051 }
4052 
4053 static int init_input_threads(void)
4054 {
4055  int i, ret;
4056 
4057  if (nb_input_files == 1)
4058  return 0;
4059 
4060  for (i = 0; i < nb_input_files; i++) {
4061  InputFile *f = input_files[i];
4062 
4063  if (f->ctx->pb ? !f->ctx->pb->seekable :
4064  strcmp(f->ctx->iformat->name, "lavfi"))
4065  f->non_blocking = 1;
4066  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4067  f->thread_queue_size, sizeof(AVPacket));
4068  if (ret < 0)
4069  return ret;
4070 
4071  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4072  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4073  av_thread_message_queue_free(&f->in_thread_queue);
4074  return AVERROR(ret);
4075  }
4076  }
4077  return 0;
4078 }
4079 
4080 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4081 {
4082  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4083  f->non_blocking ?
4085 }
4086 #endif
4087 
4089 {
4090  if (f->rate_emu) {
4091  int i;
4092  for (i = 0; i < f->nb_streams; i++) {
4093  InputStream *ist = input_streams[f->ist_index + i];
4094  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4095  int64_t now = av_gettime_relative() - ist->start;
4096  if (pts > now)
4097  return AVERROR(EAGAIN);
4098  }
4099  }
4100 
4101 #if HAVE_PTHREADS
4102  if (nb_input_files > 1)
4103  return get_input_packet_mt(f, pkt);
4104 #endif
4105  return av_read_frame(f->ctx, pkt);
4106 }
4107 
4108 static int got_eagain(void)
4109 {
4110  int i;
4111  for (i = 0; i < nb_output_streams; i++)
4112  if (output_streams[i]->unavailable)
4113  return 1;
4114  return 0;
4115 }
4116 
4117 static void reset_eagain(void)
4118 {
4119  int i;
4120  for (i = 0; i < nb_input_files; i++)
4121  input_files[i]->eagain = 0;
4122  for (i = 0; i < nb_output_streams; i++)
4123  output_streams[i]->unavailable = 0;
4124 }
4125 
4126 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4127 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4128  AVRational time_base)
4129 {
4130  int ret;
4131 
4132  if (!*duration) {
4133  *duration = tmp;
4134  return tmp_time_base;
4135  }
4136 
4137  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4138  if (ret < 0) {
4139  *duration = tmp;
4140  return tmp_time_base;
4141  }
4142 
4143  return time_base;
4144 }
4145 
4147 {
4148  InputStream *ist;
4149  AVCodecContext *avctx;
4150  int i, ret, has_audio = 0;
4151  int64_t duration = 0;
4152 
4153  ret = av_seek_frame(is, -1, is->start_time, 0);
4154  if (ret < 0)
4155  return ret;
4156 
4157  for (i = 0; i < ifile->nb_streams; i++) {
4158  ist = input_streams[ifile->ist_index + i];
4159  avctx = ist->dec_ctx;
4160 
4161  // flush decoders
4162  if (ist->decoding_needed) {
4163  process_input_packet(ist, NULL, 1);
4164  avcodec_flush_buffers(avctx);
4165  }
4166 
4167  /* duration is the length of the last frame in a stream
4168  * when audio stream is present we don't care about
4169  * last video frame length because it's not defined exactly */
4170  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4171  has_audio = 1;
4172  }
4173 
4174  for (i = 0; i < ifile->nb_streams; i++) {
4175  ist = input_streams[ifile->ist_index + i];
4176  avctx = ist->dec_ctx;
4177 
4178  if (has_audio) {
4179  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4180  AVRational sample_rate = {1, avctx->sample_rate};
4181 
4182  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4183  } else
4184  continue;
4185  } else {
4186  if (ist->framerate.num) {
4187  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
4188  } else if (ist->st->avg_frame_rate.num) {
4189  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
4190  } else duration = 1;
4191  }
4192  if (!ifile->duration)
4193  ifile->time_base = ist->st->time_base;
4194  /* the total duration of the stream, max_pts - min_pts is
4195  * the duration of the stream without the last frame */
4196  duration += ist->max_pts - ist->min_pts;
4197  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4198  ifile->time_base);
4199  }
4200 
4201  if (ifile->loop > 0)
4202  ifile->loop--;
4203 
4204  return ret;
4205 }
4206 
4207 /*
4208  * Return
4209  * - 0 -- one packet was read and processed
4210  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4211  * this function should be called again
4212  * - AVERROR_EOF -- this function should not be called again
4213  */
4214 static int process_input(int file_index)
4215 {
4216  InputFile *ifile = input_files[file_index];
4217  AVFormatContext *is;
4218  InputStream *ist;
4219  AVPacket pkt;
4220  int ret, i, j;
4221  int64_t duration;
4222  int64_t pkt_dts;
4223 
4224  is = ifile->ctx;
4225  ret = get_input_packet(ifile, &pkt);
4226 
4227  if (ret == AVERROR(EAGAIN)) {
4228  ifile->eagain = 1;
4229  return ret;
4230  }
4231  if (ret < 0 && ifile->loop) {
4232  if ((ret = seek_to_start(ifile, is)) < 0)
4233  return ret;
4234  ret = get_input_packet(ifile, &pkt);
4235  if (ret == AVERROR(EAGAIN)) {
4236  ifile->eagain = 1;
4237  return ret;
4238  }
4239  }
4240  if (ret < 0) {
4241  if (ret != AVERROR_EOF) {
4242  print_error(is->filename, ret);
4243  if (exit_on_error)
4244  exit_program(1);
4245  }
4246 
4247  for (i = 0; i < ifile->nb_streams; i++) {
4248  ist = input_streams[ifile->ist_index + i];
4249  if (ist->decoding_needed) {
4250  ret = process_input_packet(ist, NULL, 0);
4251  if (ret>0)
4252  return 0;
4253  }
4254 
4255  /* mark all outputs that don't go through lavfi as finished */
4256  for (j = 0; j < nb_output_streams; j++) {
4257  OutputStream *ost = output_streams[j];
4258 
4259  if (ost->source_index == ifile->ist_index + i &&
4260  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4261  finish_output_stream(ost);
4262  }
4263  }
4264 
4265  ifile->eof_reached = 1;
4266  return AVERROR(EAGAIN);
4267  }
4268 
4269  reset_eagain();
4270 
4271  if (do_pkt_dump) {
4273  is->streams[pkt.stream_index]);
4274  }
4275  /* the following test is needed in case new streams appear
4276  dynamically in stream : we ignore them */
4277  if (pkt.stream_index >= ifile->nb_streams) {
4278  report_new_stream(file_index, &pkt);
4279  goto discard_packet;
4280  }
4281 
4282  ist = input_streams[ifile->ist_index + pkt.stream_index];
4283 
4284  ist->data_size += pkt.size;
4285  ist->nb_packets++;
4286 
4287  if (ist->discard)
4288  goto discard_packet;
4289 
4290  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4291  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4292  exit_program(1);
4293  }
4294 
4295  if (debug_ts) {
4296  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4297  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4301  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4302  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4303  av_ts2str(input_files[ist->file_index]->ts_offset),
4304  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4305  }
4306 
4307  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4308  int64_t stime, stime2;
4309  // Correcting starttime based on the enabled streams
4310  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4311  // so we instead do it here as part of discontinuity handling
4312  if ( ist->next_dts == AV_NOPTS_VALUE
4313  && ifile->ts_offset == -is->start_time
4314  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4315  int64_t new_start_time = INT64_MAX;
4316  for (i=0; i<is->nb_streams; i++) {
4317  AVStream *st = is->streams[i];
4318  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4319  continue;
4320  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4321  }
4322  if (new_start_time > is->start_time) {
4323  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4324  ifile->ts_offset = -new_start_time;
4325  }
4326  }
4327 
4328  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4329  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4330  ist->wrap_correction_done = 1;
4331 
4332  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4333  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4334  ist->wrap_correction_done = 0;
4335  }
4336  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4337  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4338  ist->wrap_correction_done = 0;
4339  }
4340  }
4341 
4342  /* add the stream-global side data to the first packet */
4343  if (ist->nb_packets == 1) {
4344  for (i = 0; i < ist->st->nb_side_data; i++) {
4345  AVPacketSideData *src_sd = &ist->st->side_data[i];
4346  uint8_t *dst_data;
4347 
4348  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4349  continue;
4350 
4351  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4352  continue;
4353 
4354  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4355  if (!dst_data)
4356  exit_program(1);
4357 
4358  memcpy(dst_data, src_sd->data, src_sd->size);
4359  }
4360  }
4361 
4362  if (pkt.dts != AV_NOPTS_VALUE)
4363  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4364  if (pkt.pts != AV_NOPTS_VALUE)
4365  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4366 
4367  if (pkt.pts != AV_NOPTS_VALUE)
4368  pkt.pts *= ist->ts_scale;
4369  if (pkt.dts != AV_NOPTS_VALUE)
4370  pkt.dts *= ist->ts_scale;
4371 
4373  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4375  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4376  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4377  int64_t delta = pkt_dts - ifile->last_ts;
4378  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4379  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4380  ifile->ts_offset -= delta;
4382  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4383  delta, ifile->ts_offset);
4384  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4385  if (pkt.pts != AV_NOPTS_VALUE)
4386  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4387  }
4388  }
4389 
4390  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4391  if (pkt.pts != AV_NOPTS_VALUE) {
4392  pkt.pts += duration;
4393  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4394  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4395  }
4396 
4397  if (pkt.dts != AV_NOPTS_VALUE)
4398  pkt.dts += duration;
4399 
4401  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4403  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4404  !copy_ts) {
4405  int64_t delta = pkt_dts - ist->next_dts;
4406  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4407  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4408  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4409  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4410  ifile->ts_offset -= delta;
4412  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4413  delta, ifile->ts_offset);
4414  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4415  if (pkt.pts != AV_NOPTS_VALUE)
4416  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4417  }
4418  } else {
4419  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4420  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4421  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4422  pkt.dts = AV_NOPTS_VALUE;
4423  }
4424  if (pkt.pts != AV_NOPTS_VALUE){
4425  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4426  delta = pkt_pts - ist->next_dts;
4427  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4428  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4429  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4430  pkt.pts = AV_NOPTS_VALUE;
4431  }
4432  }
4433  }
4434  }
4435 
4436  if (pkt.dts != AV_NOPTS_VALUE)
4437  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4438 
4439  if (debug_ts) {
4440  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4442  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4443  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4444  av_ts2str(input_files[ist->file_index]->ts_offset),
4445  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4446  }
4447 
4448  sub2video_heartbeat(ist, pkt.pts);
4449 
4450  process_input_packet(ist, &pkt, 0);
4451 
4452 discard_packet:
4453  av_packet_unref(&pkt);
4454 
4455  return 0;
4456 }
4457 
4458 /**
4459  * Perform a step of transcoding for the specified filter graph.
4460  *
4461  * @param[in] graph filter graph to consider
4462  * @param[out] best_ist input stream where a frame would allow to continue
4463  * @return 0 for success, <0 for error
4464  */
4465 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4466 {
4467  int i, ret;
4468  int nb_requests, nb_requests_max = 0;
4469  InputFilter *ifilter;
4470  InputStream *ist;
4471 
4472  *best_ist = NULL;
4473  ret = avfilter_graph_request_oldest(graph->graph);
4474  if (ret >= 0)
4475  return reap_filters(0);
4476 
4477  if (ret == AVERROR_EOF) {
4478  ret = reap_filters(1);
4479  for (i = 0; i < graph->nb_outputs; i++)
4480  close_output_stream(graph->outputs[i]->ost);
4481  return ret;
4482  }
4483  if (ret != AVERROR(EAGAIN))
4484  return ret;
4485 
4486  for (i = 0; i < graph->nb_inputs; i++) {
4487  ifilter = graph->inputs[i];
4488  ist = ifilter->ist;
4489  if (input_files[ist->file_index]->eagain ||
4490  input_files[ist->file_index]->eof_reached)
4491  continue;
4492  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4493  if (nb_requests > nb_requests_max) {
4494  nb_requests_max = nb_requests;
4495  *best_ist = ist;
4496  }
4497  }
4498 
4499  if (!*best_ist)
4500  for (i = 0; i < graph->nb_outputs; i++)
4501  graph->outputs[i]->ost->unavailable = 1;
4502 
4503  return 0;
4504 }
4505 
4506 /**
4507  * Run a single step of transcoding.
4508  *
4509  * @return 0 for success, <0 for error
4510  */
4511 static int transcode_step(void)
4512 {
4513  OutputStream *ost;
4514  InputStream *ist = NULL;
4515  int ret;
4516 
4517  ost = choose_output();
4518  if (!ost) {
4519  if (got_eagain()) {
4520  reset_eagain();
4521  av_usleep(10000);
4522  return 0;
4523  }
4524  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4525  return AVERROR_EOF;
4526  }
4527 
4528  if (ost->filter && !ost->filter->graph->graph) {
4530  ret = configure_filtergraph(ost->filter->graph);
4531  if (ret < 0) {
4532  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4533  return ret;
4534  }
4535  }
4536  }
4537 
4538  if (ost->filter && ost->filter->graph->graph) {
4539  if (!ost->initialized) {
4540  char error[1024] = {0};
4541  ret = init_output_stream(ost, error, sizeof(error));
4542  if (ret < 0) {
4543  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4544  ost->file_index, ost->index, error);
4545  exit_program(1);
4546  }
4547  }
4548  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4549  return ret;
4550  if (!ist)
4551  return 0;
4552  } else if (ost->filter) {
4553  int i;
4554  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4555  InputFilter *ifilter = ost->filter->graph->inputs[i];
4556  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4557  ist = ifilter->ist;
4558  break;
4559  }
4560  }
4561  if (!ist) {
4562  ost->inputs_done = 1;
4563  return 0;
4564  }
4565  } else {
4566  av_assert0(ost->source_index >= 0);
4567  ist = input_streams[ost->source_index];
4568  }
4569 
4570  ret = process_input(ist->file_index);
4571  if (ret == AVERROR(EAGAIN)) {
4572  if (input_files[ist->file_index]->eagain)
4573  ost->unavailable = 1;
4574  return 0;
4575  }
4576 
4577  if (ret < 0)
4578  return ret == AVERROR_EOF ? 0 : ret;
4579 
4580  return reap_filters(0);
4581 }
4582 
4583 /*
4584  * The following code is the main loop of the file converter
4585  */
4586 static int transcode(void)
4587 {
4588  int ret, i;
4589  AVFormatContext *os;
4590  OutputStream *ost;
4591  InputStream *ist;
4592  int64_t timer_start;
4593  int64_t total_packets_written = 0;
4594 
4595  ret = transcode_init();
4596  if (ret < 0)
4597  goto fail;
4598 
4599  if (stdin_interaction) {
4600  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4601  }
4602 
4603  timer_start = av_gettime_relative();
4604 
4605 #if HAVE_PTHREADS
4606  if ((ret = init_input_threads()) < 0)
4607  goto fail;
4608 #endif
4609 
4610  while (!received_sigterm) {
4611  int64_t cur_time= av_gettime_relative();
4612 
4613  /* if 'q' pressed, exits */
4614  if (stdin_interaction)
4615  if (check_keyboard_interaction(cur_time) < 0)
4616  break;
4617 
4618  /* check if there's any stream where output is still needed */
4619  if (!need_output()) {
4620  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4621  break;
4622  }
4623 
4624  ret = transcode_step();
4625  if (ret < 0 && ret != AVERROR_EOF) {
4626  char errbuf[128];
4627  av_strerror(ret, errbuf, sizeof(errbuf));
4628 
4629  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4630  break;
4631  }
4632 
4633  /* dump report by using the output first video and audio streams */
4634  print_report(0, timer_start, cur_time);
4635  }
4636 #if HAVE_PTHREADS
4637  free_input_threads();
4638 #endif
4639 
4640  /* at the end of stream, we must flush the decoder buffers */
4641  for (i = 0; i < nb_input_streams; i++) {
4642  ist = input_streams[i];
4643  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4644  process_input_packet(ist, NULL, 0);
4645  }
4646  }
4647  flush_encoders();
4648 
4649  term_exit();
4650 
4651  /* write the trailer if needed and close file */
4652  for (i = 0; i < nb_output_files; i++) {
4653  os = output_files[i]->ctx;
4654  if (!output_files[i]->header_written) {
4656  "Nothing was written into output file %d (%s), because "
4657  "at least one of its streams received no packets.\n",
4658  i, os->filename);
4659  continue;
4660  }
4661  if ((ret = av_write_trailer(os)) < 0) {
4662  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->filename, av_err2str(ret));
4663  if (exit_on_error)
4664  exit_program(1);
4665  }
4666  }
4667 
4668  /* dump report by using the first video and audio streams */
4669  print_report(1, timer_start, av_gettime_relative());
4670 
4671  /* close each encoder */
4672  for (i = 0; i < nb_output_streams; i++) {
4673  ost = output_streams[i];
4674  if (ost->encoding_needed) {
4675  av_freep(&ost->enc_ctx->stats_in);
4676  }
4677  total_packets_written += ost->packets_written;
4678  }
4679 
4680  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4681  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4682  exit_program(1);
4683  }
4684 
4685  /* close each decoder */
4686  for (i = 0; i < nb_input_streams; i++) {
4687  ist = input_streams[i];
4688  if (ist->decoding_needed) {
4689  avcodec_close(ist->dec_ctx);
4690  if (ist->hwaccel_uninit)
4691  ist->hwaccel_uninit(ist->dec_ctx);
4692  }
4693  }
4694 
4697 
4698  /* finished ! */
4699  ret = 0;
4700 
4701  fail:
4702 #if HAVE_PTHREADS
4703  free_input_threads();
4704 #endif
4705 
4706  if (output_streams) {
4707  for (i = 0; i < nb_output_streams; i++) {
4708  ost = output_streams[i];
4709  if (ost) {
4710  if (ost->logfile) {
4711  if (fclose(ost->logfile))
4713  "Error closing logfile, loss of information possible: %s\n",
4714  av_err2str(AVERROR(errno)));
4715  ost->logfile = NULL;
4716  }
4717  av_freep(&ost->forced_kf_pts);
4718  av_freep(&ost->apad);
4719  av_freep(&ost->disposition);
4720  av_dict_free(&ost->encoder_opts);
4721  av_dict_free(&ost->sws_dict);
4722  av_dict_free(&ost->swr_opts);
4723  av_dict_free(&ost->resample_opts);
4724  }
4725  }
4726  }
4727  return ret;
4728 }
4729 
4730 
4731 static int64_t getutime(void)
4732 {
4733 #if HAVE_GETRUSAGE
4734  struct rusage rusage;
4735 
4736  getrusage(RUSAGE_SELF, &rusage);
4737  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4738 #elif HAVE_GETPROCESSTIMES
4739  HANDLE proc;
4740  FILETIME c, e, k, u;
4741  proc = GetCurrentProcess();
4742  GetProcessTimes(proc, &c, &e, &k, &u);
4743  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4744 #else
4745  return av_gettime_relative();
4746 #endif
4747 }
4748 
4749 static int64_t getmaxrss(void)
4750 {
4751 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4752  struct rusage rusage;
4753  getrusage(RUSAGE_SELF, &rusage);
4754  return (int64_t)rusage.ru_maxrss * 1024;
4755 #elif HAVE_GETPROCESSMEMORYINFO
4756  HANDLE proc;
4757  PROCESS_MEMORY_COUNTERS memcounters;
4758  proc = GetCurrentProcess();
4759  memcounters.cb = sizeof(memcounters);
4760  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4761  return memcounters.PeakPagefileUsage;
4762 #else
4763  return 0;
4764 #endif
4765 }
4766 
4767 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4768 {
4769 }
4770 
4771 int main(int argc, char **argv)
4772 {
4773  int i, ret;
4774  int64_t ti;
4775 
4776  init_dynload();
4777 
4779 
4780  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4781 
4783  parse_loglevel(argc, argv, options);
4784 
4785  if(argc>1 && !strcmp(argv[1], "-d")){
4786  run_as_daemon=1;
4788  argc--;
4789  argv++;
4790  }
4791 
4793 #if CONFIG_AVDEVICE
4795 #endif
4797  av_register_all();
4799 
4800  show_banner(argc, argv, options);
4801 
4802  /* parse options and open all input/output files */
4803  ret = ffmpeg_parse_options(argc, argv);
4804  if (ret < 0)
4805  exit_program(1);
4806 
4807  if (nb_output_files <= 0 && nb_input_files == 0) {
4808  show_usage();
4809  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4810  exit_program(1);
4811  }
4812 
4813  /* file converter / grab */
4814  if (nb_output_files <= 0) {
4815  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4816  exit_program(1);
4817  }
4818 
4819 // if (nb_input_files == 0) {
4820 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4821 // exit_program(1);
4822 // }
4823 
4824  for (i = 0; i < nb_output_files; i++) {
4825  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4826  want_sdp = 0;
4827  }
4828 
4829  current_time = ti = getutime();
4830  if (transcode() < 0)
4831  exit_program(1);
4832  ti = getutime() - ti;
4833  if (do_benchmark) {
4834  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4835  }
4836  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4839  exit_program(69);
4840 
4842  return main_return_code;
4843 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1556
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:323
int nb_bitstream_filters
Definition: ffmpeg.h:469
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:938
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
AVRational enc_timebase
Definition: ffmpeg.h:467
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
Definition: avcodec.h:3026
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:671
int got_output
Definition: ffmpeg.h:348
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1988
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1065
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2102
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:455
const struct AVCodec * codec
Definition: avcodec.h:1770
Definition: ffmpeg.h:432
AVRational framerate
Definition: avcodec.h:3460
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4233
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:828
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:952
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:376
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:567
void term_init(void)
Definition: ffmpeg.c:373
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5948
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:270
int nb_outputs
Definition: ffmpeg.h:299
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
AVDictionary * swr_opts
Definition: ffmpeg.h:515
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:309
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2419
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3043
void term_exit(void)
Definition: ffmpeg.c:315
int stream_copy
Definition: ffmpeg.h:520
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1239
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4094
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1605
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVOption.
Definition: opt.h:246
AVRational frame_rate
Definition: ffmpeg.h:484
int64_t * forced_kf_pts
Definition: ffmpeg.h:494
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:295
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3101
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:383
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:510
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:917
static int process_input(int file_index)
Definition: ffmpeg.c:4214
int exit_on_error
Definition: ffmpeg_opt.c:127
int64_t cfr_next_pts
Definition: ffmpeg.h:333
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3436
static atomic_int transcode_init_done
Definition: ffmpeg.c:323
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1826
#define LIBAVUTIL_VERSION_INT
Definition: version.h:86
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1699
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:131
Memory buffer source API.
const char * desc
Definition: nvenc.c:60
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:340
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4228
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:430
int height
Definition: ffmpeg.h:254
AVCodecParserContext * parser
Definition: ffmpeg.h:536
static int64_t cur_time
Definition: ffserver.c:252
int64_t max_pts
Definition: ffmpeg.h:329
int decoding_needed
Definition: ffmpeg.h:307
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:4152
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:232
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5923
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:959
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5914
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1646
#define vsnprintf
Definition: snprintf.h:36
int index
stream index in AVFormatContext
Definition: avformat.h:890
int size
Definition: avcodec.h:1680
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4749
int max_muxing_queue_size
Definition: ffmpeg.h:551
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:132
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2948
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:217
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:661
#define AV_DISPOSITION_DUB
Definition: avformat.h:837
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2172
int eagain
Definition: ffmpeg.h:403
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1150
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1989
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3284
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:640
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:849
int quality
Definition: ffmpeg.h:549
unsigned num_rects
Definition: avcodec.h:4132
AVFrame * filter_frame
Definition: ffmpeg.h:314
static int transcode_init(void)
Definition: ffmpeg.c:3634
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2955
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2597
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2939
int do_benchmark_all
Definition: ffmpeg_opt.c:120
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:851
int last_dropped
Definition: ffmpeg.h:478
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:672
discard all
Definition: avcodec.h:830
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:999
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:379
int64_t input_ts_offset
Definition: ffmpeg.h:409
int do_hex_dump
Definition: ffmpeg_opt.c:121
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3164
int nb_input_streams
Definition: ffmpeg.c:145
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:755
const char * name
Definition: ffmpeg.h:75
intptr_t atomic_int
Definition: stdatomic.h:55
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1002
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3619
Picture data structure.
Definition: avcodec.h:4061
#define src
Definition: vp8dsp.c:254
uint64_t packets_written
Definition: ffmpeg.h:543
AVCodec.
Definition: avcodec.h:3739
#define VSYNC_VFR
Definition: ffmpeg.h:55
int nb_dts_buffer
Definition: ffmpeg.h:395
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:496
This struct describes the properties of an encoded stream.
Definition: avcodec.h:4144
int print_stats
Definition: ffmpeg_opt.c:129
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
float dts_error_threshold
Definition: ffmpeg_opt.c:112
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:568
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:290
uint64_t data_size
Definition: ffmpeg.h:541
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:459
AVBSFContext ** bsf_ctx
Definition: ffmpeg.h:470
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:841
struct FilterGraph * graph
Definition: ffmpeg.h:245
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1898
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2281
Undefined.
Definition: avutil.h:273
AVSubtitleRect ** rects
Definition: avcodec.h:4133
int encoding_needed
Definition: ffmpeg.h:454
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:645
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4767
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:538
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3874
Format I/O context.
Definition: avformat.h:1349
uint64_t samples_decoded
Definition: ffmpeg.h:392
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:244
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2371
unsigned int nb_stream_indexes
Definition: avformat.h:1281
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:77
int64_t cur_dts
Definition: avformat.h:1066
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:4096
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1027
uint64_t frames_decoded
Definition: ffmpeg.h:391
int header_written
Definition: ffmpeg.h:573
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:293
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:505
static uint8_t * subtitle_out
Definition: ffmpeg.c:142
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:196
static int main_return_code
Definition: ffmpeg.c:325
static int64_t start_time
Definition: ffplay.c:327
int copy_initial_nonkeyframes
Definition: ffmpeg.h:530
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:130
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3007
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2531
int64_t * dts_buffer
Definition: ffmpeg.h:394
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:543
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
Opaque data information usually continuous.
Definition: avutil.h:203
AVDictionary * sws_dict
Definition: ffmpeg.h:514
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: avcodec.h:4218
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:206
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:219
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3397
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5530
int stdin_interaction
Definition: ffmpeg_opt.c:131
FILE * logfile
Definition: ffmpeg.h:506
AVDictionary * opts
Definition: ffmpeg.h:565
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:637
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1697
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1473
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:186
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2819
#define FF_API_DEBUG_MV
Definition: version.h:82
static int need_output(void)
Definition: ffmpeg.c:3807
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:395
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:1003
static double psnr(double d)
Definition: ffmpeg.c:1354
int do_benchmark
Definition: ffmpeg_opt.c:119
int audio_sync_method
Definition: ffmpeg_opt.c:115
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
int shortest
Definition: ffmpeg.h:571
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1417
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2354
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:407
static int64_t getutime(void)
Definition: ffmpeg.c:4731
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:113
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
const char * name
Definition: avcodec.h:5964
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:344
int nb_streams
Definition: ffmpeg.h:416
uint8_t * data
Definition: avcodec.h:1679
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
enum AVMediaType type
Definition: ffmpeg.h:247
static void set_tty_echo(int on)
Definition: ffmpeg.c:3862
AVDictionary * resample_opts
Definition: ffmpeg.h:516
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3197
static int flags
Definition: log.c:57
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:267
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4146
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4848
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
int * formats
Definition: ffmpeg.h:284
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:147
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:408
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1423
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:324
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:835
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1359
uint8_t * data
Definition: avcodec.h:1623
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:348
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:505
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:4097
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:556
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:312
AVBufferRef * av_buffersink_get_hw_frames_ctx(const AVFilterContext *ctx)
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1279
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2931
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:216
int top_field_first
Definition: ffmpeg.h:341
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1477
int nb_output_streams
Definition: ffmpeg.c:150
int file_index
Definition: ffmpeg.h:303
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:184
const OptionDef options[]
Definition: ffserver.c:3948
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2505
unsigned int * stream_index
Definition: avformat.h:1280
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
int wrap_correction_done
Definition: ffmpeg.h:324
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:326
uint64_t channel_layout
Audio only.
Definition: avcodec.h:4254
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:266
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:871
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1368
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:627
int64_t next_dts
Definition: ffmpeg.h:319
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1711
attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
attribute_deprecated int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Definition: avpicture.c:37
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:62
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:555
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:356
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1163
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2504
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5592
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3753
AVRational sample_aspect_ratio
Definition: ffmpeg.h:255
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3800
int rate_emu
Definition: ffmpeg.h:419
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2279
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:2083
int sample_rate
Definition: ffmpeg.h:257
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1404
static void reset_eagain(void)
Definition: ffmpeg.c:4117
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:43
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:381
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:681
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3291
FilterGraph ** filtergraphs
Definition: ffmpeg.c:154
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:464
AVFilterContext * filter
Definition: ffmpeg.h:243
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:378
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define atomic_load(object)
Definition: stdatomic.h:93
int64_t start
Definition: ffmpeg.h:316
int loop
Definition: ffmpeg.h:405
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4095
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:389
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:464
int video_sync_method
Definition: ffmpeg_opt.c:116
int format
Definition: ffmpeg.h:252
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:135
#define VSYNC_VSCFR
Definition: ffmpeg.h:56
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
char * sdp_filename
Definition: ffmpeg_opt.c:108
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:479
Display matrix.
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int video_delay
Video only.
Definition: avcodec.h:4247
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:713
const char * r
Definition: vf_curves.c:111
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:117
int capabilities
Codec capabilities.
Definition: avcodec.h:3758
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:130
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:263
unsigned int nb_programs
Definition: avformat.h:1506
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:557
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4148
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
AVChapter ** chapters
Definition: avformat.h:1557
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5954
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1625
int av_log_get_level(void)
Get the current log level.
Definition: log.c:386
const char * name
Name of the codec implementation.
Definition: avcodec.h:3746
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:879
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:76
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:622
int eof
Definition: ffmpeg.h:263
int force_fps
Definition: ffmpeg.h:486
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:312
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:970
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1276
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2268
int qp_hist
Definition: ffmpeg_opt.c:130
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:109
float frame_drop_threshold
Definition: ffmpeg_opt.c:117
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1081
int64_t error[4]
Definition: ffmpeg.h:560
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1685
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3121
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2574
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:4131
static int want_sdp
Definition: ffmpeg.c:137
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4134
OutputFilter * filter
Definition: ffmpeg.h:508
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2122
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
AVRational frame_aspect_ratio
Definition: ffmpeg.h:491
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:840
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2226
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1594
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:848
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
AVRational mux_timebase
Definition: ffmpeg.h:466
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1405
AVDictionary * opts
Definition: movenc.c:50
AVCodecContext * parser_avctx
Definition: ffmpeg.h:537
static int nb_frames_drop
Definition: ffmpeg.c:134
A bitmap, pict will be set.
Definition: avcodec.h:4076
int linesize[4]
Definition: avcodec.h:4112
int nb_output_files
Definition: ffmpeg.c:152
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:261
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:236
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:485
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
static int transcode(void)
Definition: ffmpeg.c:4586
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:929
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:439
char filename[1024]
input or output filename
Definition: avformat.h:1425
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:528
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3591
uint64_t * channel_layouts
Definition: ffmpeg.h:285
#define VSYNC_AUTO
Definition: ffmpeg.h:52
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:216
int saw_first_ts
Definition: ffmpeg.h:338
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int abort_on_flags
Definition: ffmpeg_opt.c:128
This side data contains quality related information from the encoder.
Definition: avcodec.h:1497
Immediately push the frame to the output.
Definition: buffersrc.h:46
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:268
int width
picture width / height.
Definition: avcodec.h:1948
PVOID HANDLE
char * apad
Definition: ffmpeg.h:517
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3616
int64_t nb_samples
Definition: ffmpeg.h:335
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5960
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:255
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:499
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:175
int64_t duration
Definition: ffmpeg.h:406
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:427
const char * name
Definition: avformat.h:524
int width
Definition: ffmpeg.h:254
int32_t
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:241
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:908
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:892
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2307
int nb_filtergraphs
Definition: ffmpeg.c:155
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:88
int64_t last_ts
Definition: ffmpeg.h:412
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:324
int do_pkt_dump
Definition: ffmpeg_opt.c:122
int64_t max_frames
Definition: ffmpeg.h:475
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:380
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:307
int audio_channels_mapped
Definition: ffmpeg.h:503
int n
Definition: avisynth_c.h:684
AVDictionary * metadata
Definition: avformat.h:961
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5306
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1907
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:671
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:112
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:4111
static int got_eagain(void)
Definition: ffmpeg.c:4108
int inputs_done
Definition: ffmpeg.h:527
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:219
static void error(const char *err)
int vstats_version
Definition: ffmpeg_opt.c:136
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:492
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1294
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:135
AVCodecContext * enc
Definition: muxing.c:55
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:850
int ret
Definition: ffmpeg.h:349
Keep a reference to the frame.
Definition: buffersrc.h:53
int audio_volume
Definition: ffmpeg_opt.c:114
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1726
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:486
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
InputFilter ** filters
Definition: ffmpeg.h:365
int fix_sub_duration
Definition: ffmpeg.h:346
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:646
#define VSYNC_DROP
Definition: ffmpeg.h:57
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2139
int64_t recording_time
Definition: ffmpeg.h:415
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4861
Definition: ffmpeg.h:74
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2543
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:74
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2961
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:836
AVStream * st
Definition: ffmpeg.h:304
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:173
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3156
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
int frame_size
Definition: mxfenc.c:1896
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:52
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:872
int ost_index
Definition: ffmpeg.h:566
struct InputStream * sync_ist
Definition: ffmpeg.h:458
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1447
enum AVMediaType codec_type
Definition: avcodec.h:1769
double ts_scale
Definition: ffmpeg.h:337
int unavailable
Definition: ffmpeg.h:519
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:481
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2883
enum AVCodecID codec_id
Definition: avcodec.h:1778
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:327
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1589
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:188
float max_error_rate
Definition: ffmpeg_opt.c:133
int sample_rate
samples per second
Definition: avcodec.h:2523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
uint64_t frames_encoded
Definition: ffmpeg.h:545
AVIOContext * pb
I/O context.
Definition: avformat.h:1391
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:554
int ist_index
Definition: ffmpeg.h:404
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:549
static int loop
Definition: ffplay.c:336
int debug
debug
Definition: avcodec.h:3003
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static void print_sdp(void)
Definition: ffmpeg.c:2774
const char * graph_desc
Definition: ffmpeg.h:291
int guess_layout_max
Definition: ffmpeg.h:342
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:413
struct InputStream::@38 prev_sub
main external API structure.
Definition: avcodec.h:1761
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:353
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:466
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:863
int * sample_rates
Definition: ffmpeg.h:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1144
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
const char * attachment_filename
Definition: ffmpeg.h:529
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1971
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:400
AVRational time_base
Definition: ffmpeg.h:408
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:631
AVCodecContext * enc_ctx
Definition: ffmpeg.h:472
void * buf
Definition: avisynth_c.h:690
AVFrame * decoded_frame
Definition: ffmpeg.h:313
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1877
Perform non-blocking operation.
Definition: threadmessage.h:31
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:261
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4465
int nb_coded_side_data
Definition: avcodec.h:3592
int channels
Definition: ffmpeg.h:258
int * audio_channels_map
Definition: ffmpeg.h:502
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:53
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:2039
OutputStream ** output_streams
Definition: ffmpeg.c:149
int index
Definition: gxfenc.c:89
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:3015
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:450
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:440
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2297
static int current_time
Definition: ffmpeg.c:139
int64_t sync_opts
Definition: ffmpeg.h:459
char * vstats_filename
Definition: ffmpeg_opt.c:107
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:164
AVCodecContext * dec_ctx
Definition: ffmpeg.h:311
char * disposition
Definition: ffmpeg.h:532
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:121
int filtergraph_is_simple(FilterGraph *fg)
#define mid_pred
Definition: mathops.h:97
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:825
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1100
int av_buffersink_get_w(const AVFilterContext *ctx)
int nb_streams_warn
Definition: ffmpeg.h:418
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2678
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3260
AVDictionary * decoder_opts
Definition: ffmpeg.h:339
int autorotate
Definition: ffmpeg.h:344
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:711
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:627
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:132
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4302
#define u(width,...)
int64_t ts_offset
Definition: ffmpeg.h:411
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:293
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4511
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:511
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:497
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1867
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1713
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4127
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:703
AVFrame * filtered_frame
Definition: ffmpeg.h:476
int source_index
Definition: ffmpeg.h:452
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:283
static volatile int received_nb_signals
Definition: ffmpeg.c:322
int copy_prior_start
Definition: ffmpeg.h:531
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1842
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:662
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:366
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2828
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1434
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
uint8_t level
Definition: svq3.c:207
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:498
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:320
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2450
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
int forced_kf_count
Definition: ffmpeg.h:495
int64_t start
Definition: avformat.h:1309
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:946
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:518
char * forced_keyframes
Definition: ffmpeg.h:497
int sample_rate
Audio only.
Definition: avcodec.h:4262
uint64_t data_size
Definition: ffmpeg.h:387
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:73
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:322
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1051
struct FilterGraph * graph
Definition: ffmpeg.h:269
uint64_t limit_filesize
Definition: ffmpeg.h:569
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1529
AVIOContext * progress_avio
Definition: ffmpeg.c:140
int main(int argc, char **argv)
Definition: ffmpeg.c:4771
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:478
AVCodecParameters * ref_par
Definition: ffmpeg.h:473
#define VSYNC_CFR
Definition: ffmpeg.h:54
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1073
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:936
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:491
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:950
uint32_t start_display_time
Definition: avcodec.h:4130
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1055
uint64_t samples_encoded
Definition: ffmpeg.h:546
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1308
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3221
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:948
char * key
Definition: dict.h:86
static FILE * vstats_file
Definition: ffmpeg.c:115
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:89
AVFrame * last_frame
Definition: ffmpeg.h:477
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
uint64_t channel_layout
Definition: ffmpeg.h:259
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1712
int copy_ts
Definition: ffmpeg_opt.c:123
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1035
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1361
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4339
AVFormatContext * ctx
Definition: ffmpeg.h:401
int pict_type
Definition: ffmpeg.h:557
AVSubtitle subtitle
Definition: ffmpeg.h:350
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:896
int eof_reached
Definition: ffmpeg.h:402
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
int forced_kf_index
Definition: ffmpeg.h:496
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:489
char * avfilter
Definition: ffmpeg.h:509
uint8_t * name
Definition: ffmpeg.h:246
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:353
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:111
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:727
int channels
number of audio channels
Definition: avcodec.h:2524
int top_field_first
Definition: ffmpeg.h:487
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputFilter ** outputs
Definition: ffmpeg.h:298
InputFile ** input_files
Definition: ffmpeg.c:146
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2873
void av_log_set_flags(int arg)
Definition: log.c:396
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:279
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2150
AVFormatContext * ctx
Definition: ffmpeg.h:564
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:839
void show_usage(void)
Definition: ffmpeg_opt.c:3241
int channels
Audio only.
Definition: avcodec.h:4258
An instance of a filter.
Definition: avfilter.h:338
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:892
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:372
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:513
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1301
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:113
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:5185
int height
Definition: frame.h:259
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:296
#define av_freep(p)
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:382
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:664
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2258
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2581
#define FF_DEBUG_VIS_QP
Definition: avcodec.h:3025
OutputFile ** output_files
Definition: ffmpeg.c:151
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Definition: avformat.h:1252
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
static void flush_encoders(void)
Definition: ffmpeg.c:1838
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:4156
int copy_tb
Definition: ffmpeg_opt.c:125
int64_t min_pts
Definition: ffmpeg.h:328
int initialized
Definition: ffmpeg.h:525
static volatile int received_sigterm
Definition: ffmpeg.c:321
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:305
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4088
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int stream_index
Definition: avcodec.h:1681
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:371
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:4114
int64_t first_pts
Definition: ffmpeg.h:462
int nb_inputs
Definition: ffmpeg.h:297
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:952
#define DECODING_FOR_OST
Definition: ffmpeg.h:308
int index
Definition: ffmpeg.h:451
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1108
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
OSTFinished
Definition: ffmpeg.h:444
This structure stores compressed data.
Definition: avcodec.h:1656
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:390
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1137
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5942
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:355
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:249
int debug_ts
Definition: ffmpeg_opt.c:126
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3837
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
static void sigterm_handler(int sig)
Definition: ffmpeg.c:328
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:122
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1818
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1507
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:838
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:144
static unsigned dup_warning
Definition: ffmpeg.c:133
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:68
Definition: ffmpeg.h:436
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:806
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3396
static uint8_t tmp[11]
Definition: aes_ctr.c:26