FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
145 
146 static uint8_t *subtitle_out;
147 
152 
157 
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173  Convert subtitles to video with alpha to insert them in filter graphs.
174  This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
178 {
179  int ret;
180  AVFrame *frame = ist->sub2video.frame;
181 
183  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
184  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187  return ret;
188  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189  return 0;
190 }
191 
192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193  AVSubtitleRect *r)
194 {
195  uint32_t *pal, *dst2;
196  uint8_t *src, *src2;
197  int x, y;
198 
199  if (r->type != SUBTITLE_BITMAP) {
200  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201  return;
202  }
203  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205  r->x, r->y, r->w, r->h, w, h
206  );
207  return;
208  }
209 
210  dst += r->y * dst_linesize + r->x * 4;
211  src = r->data[0];
212  pal = (uint32_t *)r->data[1];
213  for (y = 0; y < r->h; y++) {
214  dst2 = (uint32_t *)dst;
215  src2 = src;
216  for (x = 0; x < r->w; x++)
217  *(dst2++) = pal[*(src2++)];
218  dst += dst_linesize;
219  src += r->linesize[0];
220  }
221 }
222 
223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225  AVFrame *frame = ist->sub2video.frame;
226  int i;
227  int ret;
228 
229  av_assert1(frame->data[0]);
230  ist->sub2video.last_pts = frame->pts = pts;
231  for (i = 0; i < ist->nb_filters; i++) {
232  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
235  if (ret != AVERROR_EOF && ret < 0)
236  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237  av_err2str(ret));
238  }
239 }
240 
241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243  AVFrame *frame = ist->sub2video.frame;
244  int8_t *dst;
245  int dst_linesize;
246  int num_rects, i;
247  int64_t pts, end_pts;
248 
249  if (!frame)
250  return;
251  if (sub) {
252  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253  AV_TIME_BASE_Q, ist->st->time_base);
254  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
255  AV_TIME_BASE_Q, ist->st->time_base);
256  num_rects = sub->num_rects;
257  } else {
258  /* If we are initializing the system, utilize current heartbeat
259  PTS as the start time, and show until the following subpicture
260  is received. Otherwise, utilize the previous subpicture's end time
261  as the fall-back value. */
262  pts = ist->sub2video.initialize ?
263  heartbeat_pts : ist->sub2video.end_pts;
264  end_pts = INT64_MAX;
265  num_rects = 0;
266  }
267  if (sub2video_get_blank_frame(ist) < 0) {
268  av_log(ist->dec_ctx, AV_LOG_ERROR,
269  "Impossible to get a blank canvas.\n");
270  return;
271  }
272  dst = frame->data [0];
273  dst_linesize = frame->linesize[0];
274  for (i = 0; i < num_rects; i++)
275  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
277  ist->sub2video.end_pts = end_pts;
278  ist->sub2video.initialize = 0;
279 }
280 
281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283  InputFile *infile = input_files[ist->file_index];
284  int i, j, nb_reqs;
285  int64_t pts2;
286 
287  /* When a frame is read from a file, examine all sub2video streams in
288  the same file and send the sub2video frame again. Otherwise, decoded
289  video frames could be accumulating in the filter graph while a filter
290  (possibly overlay) is desperately waiting for a subtitle frame. */
291  for (i = 0; i < infile->nb_streams; i++) {
292  InputStream *ist2 = input_streams[infile->ist_index + i];
293  if (!ist2->sub2video.frame)
294  continue;
295  /* subtitles seem to be usually muxed ahead of other streams;
296  if not, subtracting a larger time here is necessary */
297  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298  /* do not send the heartbeat frame if the subtitle is already ahead */
299  if (pts2 <= ist2->sub2video.last_pts)
300  continue;
301  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302  /* if we have hit the end of the current displayed subpicture,
303  or if we need to initialize the system, update the
304  overlayed subpicture and its start/end times */
305  sub2video_update(ist2, pts2 + 1, NULL);
306  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308  if (nb_reqs)
309  sub2video_push_ref(ist2, pts2);
310  }
311 }
312 
314 {
315  int i;
316  int ret;
317 
318  if (ist->sub2video.end_pts < INT64_MAX)
319  sub2video_update(ist, INT64_MAX, NULL);
320  for (i = 0; i < ist->nb_filters; i++) {
321  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322  if (ret != AVERROR_EOF && ret < 0)
323  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324  }
325 }
326 
327 /* end of sub2video hack */
328 
329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332  if(restore_tty)
333  tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
337 void term_exit(void)
338 {
339  av_log(NULL, AV_LOG_QUIET, "%s", "");
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
349 
350 static void
352 {
353  int ret;
354  received_sigterm = sig;
357  if(received_nb_signals > 3) {
358  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359  strlen("Received > 3 system signals, hard exiting\n"));
360  if (ret < 0) { /* Do nothing */ };
361  exit(123);
362  }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370  switch (fdwCtrlType)
371  {
372  case CTRL_C_EVENT:
373  case CTRL_BREAK_EVENT:
374  sigterm_handler(SIGINT);
375  return TRUE;
376 
377  case CTRL_CLOSE_EVENT:
378  case CTRL_LOGOFF_EVENT:
379  case CTRL_SHUTDOWN_EVENT:
380  sigterm_handler(SIGTERM);
381  /* Basically, with these 3 events, when we return from this method the
382  process is hard terminated, so stall as long as we need to
383  to try and let the main thread(s) clean up and gracefully terminate
384  (we have at most 5 seconds, but should be done far before that). */
385  while (!ffmpeg_exited) {
386  Sleep(0);
387  }
388  return TRUE;
389 
390  default:
391  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392  return FALSE;
393  }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func) \
399  do { \
400  action.sa_handler = func; \
401  sigaction(sig, &action, NULL); \
402  } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405  signal(sig, func)
406 #endif
407 
408 void term_init(void)
409 {
410 #if defined __linux__
411  struct sigaction action = {0};
412  action.sa_handler = sigterm_handler;
413 
414  /* block other interrupts while processing this one */
415  sigfillset(&action.sa_mask);
416 
417  /* restart interruptible functions (i.e. don't fail with EINTR) */
418  action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
423  struct termios tty;
424  if (tcgetattr (0, &tty) == 0) {
425  oldtty = tty;
426  restore_tty = 1;
427 
428  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429  |INLCR|IGNCR|ICRNL|IXON);
430  tty.c_oflag |= OPOST;
431  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432  tty.c_cflag &= ~(CSIZE|PARENB);
433  tty.c_cflag |= CS8;
434  tty.c_cc[VMIN] = 1;
435  tty.c_cc[VTIME] = 0;
436 
437  tcsetattr (0, TCSANOW, &tty);
438  }
439  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
440  }
441 #endif
442 
443  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
444  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
445 #ifdef SIGXCPU
446  SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
457 static int read_key(void)
458 {
459  unsigned char ch;
460 #if HAVE_TERMIOS_H
461  int n = 1;
462  struct timeval tv;
463  fd_set rfds;
464 
465  FD_ZERO(&rfds);
466  FD_SET(0, &rfds);
467  tv.tv_sec = 0;
468  tv.tv_usec = 0;
469  n = select(1, &rfds, NULL, NULL, &tv);
470  if (n > 0) {
471  n = read(0, &ch, 1);
472  if (n == 1)
473  return ch;
474 
475  return n;
476  }
477 #elif HAVE_KBHIT
478 # if HAVE_PEEKNAMEDPIPE
479  static int is_pipe;
480  static HANDLE input_handle;
481  DWORD dw, nchars;
482  if(!input_handle){
483  input_handle = GetStdHandle(STD_INPUT_HANDLE);
484  is_pipe = !GetConsoleMode(input_handle, &dw);
485  }
486 
487  if (is_pipe) {
488  /* When running under a GUI, you will end here. */
489  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490  // input pipe may have been closed by the program that ran ffmpeg
491  return -1;
492  }
493  //Read it
494  if(nchars != 0) {
495  read(0, &ch, 1);
496  return ch;
497  }else{
498  return -1;
499  }
500  }
501 # endif
502  if(kbhit())
503  return(getch());
504 #endif
505  return -1;
506 }
507 
508 static int decode_interrupt_cb(void *ctx)
509 {
511 }
512 
514 
515 static void ffmpeg_cleanup(int ret)
516 {
517  int i, j;
518 
519  if (do_benchmark) {
520  int maxrss = getmaxrss() / 1024;
521  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522  }
523 
524  for (i = 0; i < nb_filtergraphs; i++) {
525  FilterGraph *fg = filtergraphs[i];
527  for (j = 0; j < fg->nb_inputs; j++) {
528  InputFilter *ifilter = fg->inputs[j];
529  struct InputStream *ist = ifilter->ist;
530 
531  while (av_fifo_size(ifilter->frame_queue)) {
532  AVFrame *frame;
534  sizeof(frame), NULL);
536  }
539  if (ist->sub2video.sub_queue) {
540  while (av_fifo_size(ist->sub2video.sub_queue)) {
541  AVSubtitle sub;
542  av_fifo_generic_read(ist->sub2video.sub_queue,
543  &sub, sizeof(sub), NULL);
545  }
546  av_fifo_freep(&ist->sub2video.sub_queue);
547  }
549  av_freep(&ifilter->name);
550  av_freep(&fg->inputs[j]);
551  }
552  av_freep(&fg->inputs);
553  for (j = 0; j < fg->nb_outputs; j++) {
554  OutputFilter *ofilter = fg->outputs[j];
555 
557  av_freep(&ofilter->name);
558  av_freep(&fg->outputs[j]);
559  }
560  av_freep(&fg->outputs);
561  av_freep(&fg->graph_desc);
562 
564  }
566 
568 
569  /* close files */
570  for (i = 0; i < nb_output_files; i++) {
571  OutputFile *of = output_files[i];
573  if (!of)
574  continue;
575  s = of->ctx;
576  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
577  avio_closep(&s->pb);
579  av_dict_free(&of->opts);
580 
582  }
583  for (i = 0; i < nb_output_streams; i++) {
585 
586  if (!ost)
587  continue;
588 
590 
595 
598  av_freep(&ost->avfilter);
600 
603 
606 
609 
610  if (ost->muxing_queue) {
611  while (av_fifo_size(ost->muxing_queue)) {
612  AVPacket *pkt;
615  }
617  }
618 
620  }
621 #if HAVE_THREADS
622  free_input_threads();
623 #endif
624  for (i = 0; i < nb_input_files; i++) {
628  }
629  for (i = 0; i < nb_input_streams; i++) {
631 
632  av_frame_free(&ist->decoded_frame);
633  av_packet_free(&ist->pkt);
634  av_dict_free(&ist->decoder_opts);
635  avsubtitle_free(&ist->prev_sub.subtitle);
636  av_frame_free(&ist->sub2video.frame);
637  av_freep(&ist->filters);
638  av_freep(&ist->hwaccel_device);
639  av_freep(&ist->dts_buffer);
640 
641  avcodec_free_context(&ist->dec_ctx);
642 
644  }
645 
646  if (vstats_file) {
647  if (fclose(vstats_file))
649  "Error closing vstats file, loss of information possible: %s\n",
650  av_err2str(AVERROR(errno)));
651  }
654 
659 
660  uninit_opts();
661 
663 
664  if (received_sigterm) {
665  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
666  (int) received_sigterm);
667  } else if (ret && atomic_load(&transcode_init_done)) {
668  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
669  }
670  term_exit();
671  ffmpeg_exited = 1;
672 }
673 
675 {
676  const AVDictionaryEntry *t = NULL;
677 
678  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
680  }
681 }
682 
684 {
685  const AVDictionaryEntry *t;
686  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
687  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
688  exit_program(1);
689  }
690 }
691 
692 static void abort_codec_experimental(const AVCodec *c, int encoder)
693 {
694  exit_program(1);
695 }
696 
697 static void update_benchmark(const char *fmt, ...)
698 {
699  if (do_benchmark_all) {
701  va_list va;
702  char buf[1024];
703 
704  if (fmt) {
705  va_start(va, fmt);
706  vsnprintf(buf, sizeof(buf), fmt, va);
707  va_end(va);
709  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
712  t.real_usec - current_time.real_usec, buf);
713  }
714  current_time = t;
715  }
716 }
717 
719 {
720  int i;
721  for (i = 0; i < nb_output_streams; i++) {
722  OutputStream *ost2 = output_streams[i];
723  ost2->finished |= ost == ost2 ? this_stream : others;
724  }
725 }
726 
727 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
728 {
729  AVFormatContext *s = of->ctx;
730  AVStream *st = ost->st;
731  int ret;
732 
733  /*
734  * Audio encoders may split the packets -- #frames in != #packets out.
735  * But there is no reordering, so we can limit the number of output packets
736  * by simply dropping them here.
737  * Counting encoded video frames needs to be done separately because of
738  * reordering, see do_video_out().
739  * Do not count the packet when unqueued because it has been counted when queued.
740  */
741  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
742  if (ost->frame_number >= ost->max_frames) {
744  return;
745  }
746  ost->frame_number++;
747  }
748 
749  if (!of->header_written) {
750  AVPacket *tmp_pkt;
751  /* the muxer is not initialized yet, buffer the packet */
752  if (!av_fifo_space(ost->muxing_queue)) {
753  size_t cur_size = av_fifo_size(ost->muxing_queue);
754  unsigned int are_we_over_size =
756  size_t limit = are_we_over_size ? ost->max_muxing_queue_size : INT_MAX;
757  size_t new_size = FFMIN(2 * cur_size, limit);
758 
759  if (new_size <= cur_size) {
761  "Too many packets buffered for output stream %d:%d.\n",
762  ost->file_index, ost->st->index);
763  exit_program(1);
764  }
765  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
766  if (ret < 0)
767  exit_program(1);
768  }
770  if (ret < 0)
771  exit_program(1);
772  tmp_pkt = av_packet_alloc();
773  if (!tmp_pkt)
774  exit_program(1);
775  av_packet_move_ref(tmp_pkt, pkt);
776  ost->muxing_queue_data_size += tmp_pkt->size;
777  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
778  return;
779  }
780 
783  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
784 
786  int i;
788  NULL);
789  ost->quality = sd ? AV_RL32(sd) : -1;
790  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
791 
792  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
793  if (sd && i < sd[5])
794  ost->error[i] = AV_RL64(sd + 8 + 8*i);
795  else
796  ost->error[i] = -1;
797  }
798 
799  if (ost->frame_rate.num && ost->is_cfr) {
800  if (pkt->duration > 0)
801  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
803  ost->mux_timebase);
804  }
805  }
806 
808 
809  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
810  if (pkt->dts != AV_NOPTS_VALUE &&
811  pkt->pts != AV_NOPTS_VALUE &&
812  pkt->dts > pkt->pts) {
813  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
814  pkt->dts, pkt->pts,
815  ost->file_index, ost->st->index);
816  pkt->pts =
817  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
818  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
819  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
820  }
822  pkt->dts != AV_NOPTS_VALUE &&
825  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
826  if (pkt->dts < max) {
827  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
828  if (exit_on_error)
829  loglevel = AV_LOG_ERROR;
830  av_log(s, loglevel, "Non-monotonous DTS in output stream "
831  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
833  if (exit_on_error) {
834  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
835  exit_program(1);
836  }
837  av_log(s, loglevel, "changing to %"PRId64". This may result "
838  "in incorrect timestamps in the output file.\n",
839  max);
840  if (pkt->pts >= pkt->dts)
841  pkt->pts = FFMAX(pkt->pts, max);
842  pkt->dts = max;
843  }
844  }
845  }
846  ost->last_mux_dts = pkt->dts;
847 
848  ost->data_size += pkt->size;
849  ost->packets_written++;
850 
852 
853  if (debug_ts) {
854  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
855  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
859  pkt->size
860  );
861  }
862 
864  if (ret < 0) {
865  print_error("av_interleaved_write_frame()", ret);
866  main_return_code = 1;
868  }
869 }
870 
872 {
875 
877  if (of->shortest) {
878  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
879  of->recording_time = FFMIN(of->recording_time, end);
880  }
881 }
882 
883 /*
884  * Send a single packet to the output, applying any bitstream filters
885  * associated with the output stream. This may result in any number
886  * of packets actually being written, depending on what bitstream
887  * filters are applied. The supplied packet is consumed and will be
888  * blank (as if newly-allocated) when this function returns.
889  *
890  * If eof is set, instead indicate EOF to all bitstream filters and
891  * therefore flush any delayed packets to the output. A blank packet
892  * must be supplied in this case.
893  */
895  OutputStream *ost, int eof)
896 {
897  int ret = 0;
898 
899  /* apply the output bitstream filters */
900  if (ost->bsf_ctx) {
901  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
902  if (ret < 0)
903  goto finish;
904  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
905  write_packet(of, pkt, ost, 0);
906  if (ret == AVERROR(EAGAIN))
907  ret = 0;
908  } else if (!eof)
909  write_packet(of, pkt, ost, 0);
910 
911 finish:
912  if (ret < 0 && ret != AVERROR_EOF) {
913  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
914  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
915  if(exit_on_error)
916  exit_program(1);
917  }
918 }
919 
921 {
923 
924  if (of->recording_time != INT64_MAX &&
926  AV_TIME_BASE_Q) >= 0) {
928  return 0;
929  }
930  return 1;
931 }
932 
934  AVFrame *frame)
935 {
936  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
937  AVCodecContext *enc = ost->enc_ctx;
938  if (!frame || frame->pts == AV_NOPTS_VALUE ||
939  !enc || !ost->filter || !ost->filter->graph->graph)
940  goto early_exit;
941 
942  {
944 
945  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
947  AVRational tb = enc->time_base;
948  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
949 
950  tb.den <<= extra_bits;
951  float_pts =
952  av_rescale_q(frame->pts, filter_tb, tb) -
954  float_pts /= 1 << extra_bits;
955  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
956  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
957 
958  frame->pts =
959  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
961  }
962 
963 early_exit:
964 
965  if (debug_ts) {
966  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
967  frame ? av_ts2str(frame->pts) : "NULL",
968  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
969  float_pts,
970  enc ? enc->time_base.num : -1,
971  enc ? enc->time_base.den : -1);
972  }
973 
974  return float_pts;
975 }
976 
978  char *error, int error_len);
979 
981  unsigned int fatal)
982 {
983  int ret = AVERROR_BUG;
984  char error[1024] = {0};
985 
986  if (ost->initialized)
987  return 0;
988 
989  ret = init_output_stream(ost, frame, error, sizeof(error));
990  if (ret < 0) {
991  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
993 
994  if (fatal)
995  exit_program(1);
996  }
997 
998  return ret;
999 }
1000 
1002  AVFrame *frame)
1003 {
1004  AVCodecContext *enc = ost->enc_ctx;
1005  AVPacket *pkt = ost->pkt;
1006  int ret;
1007 
1009 
1010  if (!check_recording_time(ost))
1011  return;
1012 
1013  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1014  frame->pts = ost->sync_opts;
1015  ost->sync_opts = frame->pts + frame->nb_samples;
1016  ost->samples_encoded += frame->nb_samples;
1017  ost->frames_encoded++;
1018 
1020  if (debug_ts) {
1021  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1022  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1023  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1024  enc->time_base.num, enc->time_base.den);
1025  }
1026 
1027  ret = avcodec_send_frame(enc, frame);
1028  if (ret < 0)
1029  goto error;
1030 
1031  while (1) {
1032  ret = avcodec_receive_packet(enc, pkt);
1033  if (ret == AVERROR(EAGAIN))
1034  break;
1035  if (ret < 0)
1036  goto error;
1037 
1038  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1039 
1041 
1042  if (debug_ts) {
1043  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1044  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1047  }
1048 
1049  output_packet(of, pkt, ost, 0);
1050  }
1051 
1052  return;
1053 error:
1054  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1055  exit_program(1);
1056 }
1057 
1058 static void do_subtitle_out(OutputFile *of,
1059  OutputStream *ost,
1060  AVSubtitle *sub)
1061 {
1062  int subtitle_out_max_size = 1024 * 1024;
1063  int subtitle_out_size, nb, i;
1064  AVCodecContext *enc;
1065  AVPacket *pkt = ost->pkt;
1066  int64_t pts;
1067 
1068  if (sub->pts == AV_NOPTS_VALUE) {
1069  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1070  if (exit_on_error)
1071  exit_program(1);
1072  return;
1073  }
1074 
1075  enc = ost->enc_ctx;
1076 
1077  if (!subtitle_out) {
1078  subtitle_out = av_malloc(subtitle_out_max_size);
1079  if (!subtitle_out) {
1080  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1081  exit_program(1);
1082  }
1083  }
1084 
1085  /* Note: DVB subtitle need one packet to draw them and one other
1086  packet to clear them */
1087  /* XXX: signal it in the codec context ? */
1088  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1089  nb = 2;
1090  else
1091  nb = 1;
1092 
1093  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1094  pts = sub->pts;
1097  for (i = 0; i < nb; i++) {
1098  unsigned save_num_rects = sub->num_rects;
1099 
1101  if (!check_recording_time(ost))
1102  return;
1103 
1104  sub->pts = pts;
1105  // start_display_time is required to be 0
1106  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1107  sub->end_display_time -= sub->start_display_time;
1108  sub->start_display_time = 0;
1109  if (i == 1)
1110  sub->num_rects = 0;
1111 
1112  ost->frames_encoded++;
1113 
1114  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1115  subtitle_out_max_size, sub);
1116  if (i == 1)
1117  sub->num_rects = save_num_rects;
1118  if (subtitle_out_size < 0) {
1119  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1120  exit_program(1);
1121  }
1122 
1124  pkt->data = subtitle_out;
1125  pkt->size = subtitle_out_size;
1127  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1128  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1129  /* XXX: the pts correction is handled here. Maybe handling
1130  it in the codec would be better */
1131  if (i == 0)
1132  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1133  else
1134  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1135  }
1136  pkt->dts = pkt->pts;
1137  output_packet(of, pkt, ost, 0);
1138  }
1139 }
1140 
1141 /* May modify/reset next_picture */
1142 static void do_video_out(OutputFile *of,
1143  OutputStream *ost,
1144  AVFrame *next_picture)
1145 {
1146  int ret;
1147  AVPacket *pkt = ost->pkt;
1148  AVCodecContext *enc = ost->enc_ctx;
1149  AVRational frame_rate;
1150  int nb_frames, nb0_frames, i;
1151  double delta, delta0;
1152  double duration = 0;
1153  double sync_ipts = AV_NOPTS_VALUE;
1154  int frame_size = 0;
1155  InputStream *ist = NULL;
1157 
1158  init_output_stream_wrapper(ost, next_picture, 1);
1159  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1160 
1161  if (ost->source_index >= 0)
1163 
1164  frame_rate = av_buffersink_get_frame_rate(filter);
1165  if (frame_rate.num > 0 && frame_rate.den > 0)
1166  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1167 
1168  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1170 
1171  if (!ost->filters_script &&
1172  !ost->filters &&
1173  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1174  next_picture &&
1175  ist &&
1176  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1177  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1178  }
1179 
1180  if (!next_picture) {
1181  //end, flushing
1182  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1183  ost->last_nb0_frames[1],
1184  ost->last_nb0_frames[2]);
1185  } else {
1186  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1187  delta = delta0 + duration;
1188 
1189  /* by default, we output a single frame */
1190  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1191  nb_frames = 1;
1192 
1193  if (delta0 < 0 &&
1194  delta > 0 &&
1196  ost->vsync_method != VSYNC_DROP) {
1197  if (delta0 < -0.6) {
1198  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1199  } else
1200  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1201  sync_ipts = ost->sync_opts;
1202  duration += delta0;
1203  delta0 = 0;
1204  }
1205 
1206  switch (ost->vsync_method) {
1207  case VSYNC_VSCFR:
1208  if (ost->frame_number == 0 && delta0 >= 0.5) {
1209  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1210  delta = duration;
1211  delta0 = 0;
1212  ost->sync_opts = llrint(sync_ipts);
1213  }
1214  case VSYNC_CFR:
1215  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1216  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1217  nb_frames = 0;
1218  } else if (delta < -1.1)
1219  nb_frames = 0;
1220  else if (delta > 1.1) {
1221  nb_frames = lrintf(delta);
1222  if (delta0 > 1.1)
1223  nb0_frames = llrintf(delta0 - 0.6);
1224  }
1225  break;
1226  case VSYNC_VFR:
1227  if (delta <= -0.6)
1228  nb_frames = 0;
1229  else if (delta > 0.6)
1230  ost->sync_opts = llrint(sync_ipts);
1231  break;
1232  case VSYNC_DROP:
1233  case VSYNC_PASSTHROUGH:
1234  ost->sync_opts = llrint(sync_ipts);
1235  break;
1236  default:
1237  av_assert0(0);
1238  }
1239  }
1240 
1241  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1242  nb0_frames = FFMIN(nb0_frames, nb_frames);
1243 
1244  memmove(ost->last_nb0_frames + 1,
1246  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1247  ost->last_nb0_frames[0] = nb0_frames;
1248 
1249  if (nb0_frames == 0 && ost->last_dropped) {
1250  nb_frames_drop++;
1252  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1254  }
1255  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1256  if (nb_frames > dts_error_threshold * 30) {
1257  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1258  nb_frames_drop++;
1259  return;
1260  }
1261  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1262  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1263  if (nb_frames_dup > dup_warning) {
1264  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1265  dup_warning *= 10;
1266  }
1267  }
1268  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1269  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1270 
1271  /* duplicates frame if needed */
1272  for (i = 0; i < nb_frames; i++) {
1273  AVFrame *in_picture;
1274  int forced_keyframe = 0;
1275  double pts_time;
1276 
1277  if (i < nb0_frames && ost->last_frame->buf[0]) {
1278  in_picture = ost->last_frame;
1279  } else
1280  in_picture = next_picture;
1281 
1282  if (!in_picture)
1283  return;
1284 
1285  in_picture->pts = ost->sync_opts;
1286 
1287  if (!check_recording_time(ost))
1288  return;
1289 
1290  in_picture->quality = enc->global_quality;
1291  in_picture->pict_type = 0;
1292 
1294  in_picture->pts != AV_NOPTS_VALUE)
1295  ost->forced_kf_ref_pts = in_picture->pts;
1296 
1297  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1298  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1300  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1301  ost->forced_kf_index++;
1302  forced_keyframe = 1;
1303  } else if (ost->forced_keyframes_pexpr) {
1304  double res;
1308  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1314  res);
1315  if (res) {
1316  forced_keyframe = 1;
1322  }
1323 
1325  } else if ( ost->forced_keyframes
1326  && !strncmp(ost->forced_keyframes, "source", 6)
1327  && in_picture->key_frame==1
1328  && !i) {
1329  forced_keyframe = 1;
1330  } else if ( ost->forced_keyframes
1331  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1332  && !i) {
1333  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1334  ost->dropped_keyframe = 0;
1335  }
1336 
1337  if (forced_keyframe) {
1338  in_picture->pict_type = AV_PICTURE_TYPE_I;
1339  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1340  }
1341 
1343  if (debug_ts) {
1344  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1345  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1346  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1347  enc->time_base.num, enc->time_base.den);
1348  }
1349 
1350  ost->frames_encoded++;
1351 
1352  ret = avcodec_send_frame(enc, in_picture);
1353  if (ret < 0)
1354  goto error;
1355  // Make sure Closed Captions will not be duplicated
1357 
1358  while (1) {
1359  ret = avcodec_receive_packet(enc, pkt);
1360  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1361  if (ret == AVERROR(EAGAIN))
1362  break;
1363  if (ret < 0)
1364  goto error;
1365 
1366  if (debug_ts) {
1367  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1368  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1371  }
1372 
1374  pkt->pts = ost->sync_opts;
1375 
1377 
1378  if (debug_ts) {
1379  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1380  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1383  }
1384 
1385  frame_size = pkt->size;
1386  output_packet(of, pkt, ost, 0);
1387 
1388  /* if two pass, output log */
1389  if (ost->logfile && enc->stats_out) {
1390  fprintf(ost->logfile, "%s", enc->stats_out);
1391  }
1392  }
1393  ost->sync_opts++;
1394  /*
1395  * For video, number of frames in == number of packets out.
1396  * But there may be reordering, so we can't throw away frames on encoder
1397  * flush, we need to limit them here, before they go into encoder.
1398  */
1399  ost->frame_number++;
1400 
1401  if (vstats_filename && frame_size)
1403  }
1404 
1406  if (next_picture)
1407  av_frame_move_ref(ost->last_frame, next_picture);
1408 
1409  return;
1410 error:
1411  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1412  exit_program(1);
1413 }
1414 
1415 static double psnr(double d)
1416 {
1417  return -10.0 * log10(d);
1418 }
1419 
1421 {
1422  AVCodecContext *enc;
1423  int frame_number;
1424  double ti1, bitrate, avg_bitrate;
1425 
1426  /* this is executed just the first time do_video_stats is called */
1427  if (!vstats_file) {
1428  vstats_file = fopen(vstats_filename, "w");
1429  if (!vstats_file) {
1430  perror("fopen");
1431  exit_program(1);
1432  }
1433  }
1434 
1435  enc = ost->enc_ctx;
1436  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1437  frame_number = ost->st->nb_frames;
1438  if (vstats_version <= 1) {
1439  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1440  ost->quality / (float)FF_QP2LAMBDA);
1441  } else {
1442  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1443  ost->quality / (float)FF_QP2LAMBDA);
1444  }
1445 
1446  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1447  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1448 
1449  fprintf(vstats_file,"f_size= %6d ", frame_size);
1450  /* compute pts value */
1452  if (ti1 < 0.01)
1453  ti1 = 0.01;
1454 
1455  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1456  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1457  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1458  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1459  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1460  }
1461 }
1462 
1464 {
1467 
1469 
1470  if (of->shortest) {
1471  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
1472  of->recording_time = FFMIN(of->recording_time, end);
1473  }
1474 }
1475 
1476 /**
1477  * Get and encode new output from any of the filtergraphs, without causing
1478  * activity.
1479  *
1480  * @return 0 for success, <0 for severe errors
1481  */
1482 static int reap_filters(int flush)
1483 {
1484  AVFrame *filtered_frame = NULL;
1485  int i;
1486 
1487  /* Reap all buffers present in the buffer sinks */
1488  for (i = 0; i < nb_output_streams; i++) {
1492  AVCodecContext *enc = ost->enc_ctx;
1493  int ret = 0;
1494 
1495  if (!ost->filter || !ost->filter->graph->graph)
1496  continue;
1497  filter = ost->filter->filter;
1498 
1499  /*
1500  * Unlike video, with audio the audio frame size matters.
1501  * Currently we are fully reliant on the lavfi filter chain to
1502  * do the buffering deed for us, and thus the frame size parameter
1503  * needs to be set accordingly. Where does one get the required
1504  * frame size? From the initialized AVCodecContext of an audio
1505  * encoder. Thus, if we have gotten to an audio stream, initialize
1506  * the encoder earlier than receiving the first AVFrame.
1507  */
1510 
1511  filtered_frame = ost->filtered_frame;
1512 
1513  while (1) {
1514  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1516  if (ret < 0) {
1517  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1519  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1520  } else if (flush && ret == AVERROR_EOF) {
1522  do_video_out(of, ost, NULL);
1523  }
1524  break;
1525  }
1526  if (ost->finished) {
1527  av_frame_unref(filtered_frame);
1528  continue;
1529  }
1530 
1531  switch (av_buffersink_get_type(filter)) {
1532  case AVMEDIA_TYPE_VIDEO:
1533  if (!ost->frame_aspect_ratio.num)
1534  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1535 
1536  do_video_out(of, ost, filtered_frame);
1537  break;
1538  case AVMEDIA_TYPE_AUDIO:
1539  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1540  enc->channels != filtered_frame->channels) {
1542  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1543  break;
1544  }
1545  do_audio_out(of, ost, filtered_frame);
1546  break;
1547  default:
1548  // TODO support subtitle filters
1549  av_assert0(0);
1550  }
1551 
1552  av_frame_unref(filtered_frame);
1553  }
1554  }
1555 
1556  return 0;
1557 }
1558 
1559 static void print_final_stats(int64_t total_size)
1560 {
1561  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1562  uint64_t subtitle_size = 0;
1563  uint64_t data_size = 0;
1564  float percent = -1.0;
1565  int i, j;
1566  int pass1_used = 1;
1567 
1568  for (i = 0; i < nb_output_streams; i++) {
1570  switch (ost->enc_ctx->codec_type) {
1571  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1572  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1573  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1574  default: other_size += ost->data_size; break;
1575  }
1576  extra_size += ost->enc_ctx->extradata_size;
1577  data_size += ost->data_size;
1580  pass1_used = 0;
1581  }
1582 
1583  if (data_size && total_size>0 && total_size >= data_size)
1584  percent = 100.0 * (total_size - data_size) / data_size;
1585 
1586  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1587  video_size / 1024.0,
1588  audio_size / 1024.0,
1589  subtitle_size / 1024.0,
1590  other_size / 1024.0,
1591  extra_size / 1024.0);
1592  if (percent >= 0.0)
1593  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1594  else
1595  av_log(NULL, AV_LOG_INFO, "unknown");
1596  av_log(NULL, AV_LOG_INFO, "\n");
1597 
1598  /* print verbose per-stream stats */
1599  for (i = 0; i < nb_input_files; i++) {
1600  InputFile *f = input_files[i];
1601  uint64_t total_packets = 0, total_size = 0;
1602 
1603  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1604  i, f->ctx->url);
1605 
1606  for (j = 0; j < f->nb_streams; j++) {
1607  InputStream *ist = input_streams[f->ist_index + j];
1608  enum AVMediaType type = ist->dec_ctx->codec_type;
1609 
1610  total_size += ist->data_size;
1611  total_packets += ist->nb_packets;
1612 
1613  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1614  i, j, media_type_string(type));
1615  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1616  ist->nb_packets, ist->data_size);
1617 
1618  if (ist->decoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1620  ist->frames_decoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "\n");
1627  }
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1630  total_packets, total_size);
1631  }
1632 
1633  for (i = 0; i < nb_output_files; i++) {
1634  OutputFile *of = output_files[i];
1635  uint64_t total_packets = 0, total_size = 0;
1636 
1637  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1638  i, of->ctx->url);
1639 
1640  for (j = 0; j < of->ctx->nb_streams; j++) {
1643 
1644  total_size += ost->data_size;
1645  total_packets += ost->packets_written;
1646 
1647  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1648  i, j, media_type_string(type));
1649  if (ost->encoding_needed) {
1650  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1651  ost->frames_encoded);
1652  if (type == AVMEDIA_TYPE_AUDIO)
1653  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1654  av_log(NULL, AV_LOG_VERBOSE, "; ");
1655  }
1656 
1657  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1659 
1660  av_log(NULL, AV_LOG_VERBOSE, "\n");
1661  }
1662 
1663  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1664  total_packets, total_size);
1665  }
1666  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1667  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1668  if (pass1_used) {
1669  av_log(NULL, AV_LOG_WARNING, "\n");
1670  } else {
1671  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1672  }
1673  }
1674 }
1675 
1676 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1677 {
1678  AVBPrint buf, buf_script;
1679  OutputStream *ost;
1680  AVFormatContext *oc;
1681  int64_t total_size;
1682  AVCodecContext *enc;
1683  int frame_number, vid, i;
1684  double bitrate;
1685  double speed;
1686  int64_t pts = INT64_MIN + 1;
1687  static int64_t last_time = -1;
1688  static int first_report = 1;
1689  static int qp_histogram[52];
1690  int hours, mins, secs, us;
1691  const char *hours_sign;
1692  int ret;
1693  float t;
1694 
1695  if (!print_stats && !is_last_report && !progress_avio)
1696  return;
1697 
1698  if (!is_last_report) {
1699  if (last_time == -1) {
1700  last_time = cur_time;
1701  }
1702  if (((cur_time - last_time) < stats_period && !first_report) ||
1703  (first_report && nb_output_dumped < nb_output_files))
1704  return;
1705  last_time = cur_time;
1706  }
1707 
1708  t = (cur_time-timer_start) / 1000000.0;
1709 
1710 
1711  oc = output_files[0]->ctx;
1712 
1713  total_size = avio_size(oc->pb);
1714  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1715  total_size = avio_tell(oc->pb);
1716 
1717  vid = 0;
1719  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1720  for (i = 0; i < nb_output_streams; i++) {
1721  float q = -1;
1722  ost = output_streams[i];
1723  enc = ost->enc_ctx;
1724  if (!ost->stream_copy)
1725  q = ost->quality / (float) FF_QP2LAMBDA;
1726 
1727  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1728  av_bprintf(&buf, "q=%2.1f ", q);
1729  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1730  ost->file_index, ost->index, q);
1731  }
1732  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1733  float fps;
1734 
1735  frame_number = ost->frame_number;
1736  fps = t > 1 ? frame_number / t : 0;
1737  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1738  frame_number, fps < 9.95, fps, q);
1739  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1740  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1741  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1742  ost->file_index, ost->index, q);
1743  if (is_last_report)
1744  av_bprintf(&buf, "L");
1745  if (qp_hist) {
1746  int j;
1747  int qp = lrintf(q);
1748  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1749  qp_histogram[qp]++;
1750  for (j = 0; j < 32; j++)
1751  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1752  }
1753 
1754  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1755  int j;
1756  double error, error_sum = 0;
1757  double scale, scale_sum = 0;
1758  double p;
1759  char type[3] = { 'Y','U','V' };
1760  av_bprintf(&buf, "PSNR=");
1761  for (j = 0; j < 3; j++) {
1762  if (is_last_report) {
1763  error = enc->error[j];
1764  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1765  } else {
1766  error = ost->error[j];
1767  scale = enc->width * enc->height * 255.0 * 255.0;
1768  }
1769  if (j)
1770  scale /= 4;
1771  error_sum += error;
1772  scale_sum += scale;
1773  p = psnr(error / scale);
1774  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1775  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1776  ost->file_index, ost->index, type[j] | 32, p);
1777  }
1778  p = psnr(error_sum / scale_sum);
1779  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1780  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1781  ost->file_index, ost->index, p);
1782  }
1783  vid = 1;
1784  }
1785  /* compute min output value */
1789  if (copy_ts) {
1790  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1794  }
1795  }
1796 
1797  if (is_last_report)
1799  }
1800 
1801  secs = FFABS(pts) / AV_TIME_BASE;
1802  us = FFABS(pts) % AV_TIME_BASE;
1803  mins = secs / 60;
1804  secs %= 60;
1805  hours = mins / 60;
1806  mins %= 60;
1807  hours_sign = (pts < 0) ? "-" : "";
1808 
1809  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1810  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1811 
1812  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1813  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1814  if (pts == AV_NOPTS_VALUE) {
1815  av_bprintf(&buf, "N/A ");
1816  } else {
1817  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1818  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1819  }
1820 
1821  if (bitrate < 0) {
1822  av_bprintf(&buf, "bitrate=N/A");
1823  av_bprintf(&buf_script, "bitrate=N/A\n");
1824  }else{
1825  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1826  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1827  }
1828 
1829  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1830  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1831  if (pts == AV_NOPTS_VALUE) {
1832  av_bprintf(&buf_script, "out_time_us=N/A\n");
1833  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1834  av_bprintf(&buf_script, "out_time=N/A\n");
1835  } else {
1836  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1837  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1838  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1839  hours_sign, hours, mins, secs, us);
1840  }
1841 
1843  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1844  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1845  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1846 
1847  if (speed < 0) {
1848  av_bprintf(&buf, " speed=N/A");
1849  av_bprintf(&buf_script, "speed=N/A\n");
1850  } else {
1851  av_bprintf(&buf, " speed=%4.3gx", speed);
1852  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1853  }
1854 
1855  if (print_stats || is_last_report) {
1856  const char end = is_last_report ? '\n' : '\r';
1857  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1858  fprintf(stderr, "%s %c", buf.str, end);
1859  } else
1860  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1861 
1862  fflush(stderr);
1863  }
1864  av_bprint_finalize(&buf, NULL);
1865 
1866  if (progress_avio) {
1867  av_bprintf(&buf_script, "progress=%s\n",
1868  is_last_report ? "end" : "continue");
1869  avio_write(progress_avio, buf_script.str,
1870  FFMIN(buf_script.len, buf_script.size - 1));
1872  av_bprint_finalize(&buf_script, NULL);
1873  if (is_last_report) {
1874  if ((ret = avio_closep(&progress_avio)) < 0)
1876  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1877  }
1878  }
1879 
1880  first_report = 0;
1881 
1882  if (is_last_report)
1883  print_final_stats(total_size);
1884 }
1885 
1887 {
1888  // We never got any input. Set a fake format, which will
1889  // come from libavformat.
1890  ifilter->format = par->format;
1892  ifilter->channels = par->channels;
1894  ifilter->width = par->width;
1895  ifilter->height = par->height;
1897 }
1898 
1899 static void flush_encoders(void)
1900 {
1901  int i, ret;
1902 
1903  for (i = 0; i < nb_output_streams; i++) {
1905  AVCodecContext *enc = ost->enc_ctx;
1907 
1908  if (!ost->encoding_needed)
1909  continue;
1910 
1911  // Try to enable encoding with no input frames.
1912  // Maybe we should just let encoding fail instead.
1913  if (!ost->initialized) {
1914  FilterGraph *fg = ost->filter->graph;
1915 
1917  "Finishing stream %d:%d without any data written to it.\n",
1918  ost->file_index, ost->st->index);
1919 
1920  if (ost->filter && !fg->graph) {
1921  int x;
1922  for (x = 0; x < fg->nb_inputs; x++) {
1923  InputFilter *ifilter = fg->inputs[x];
1924  if (ifilter->format < 0)
1926  }
1927 
1929  continue;
1930 
1931  ret = configure_filtergraph(fg);
1932  if (ret < 0) {
1933  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1934  exit_program(1);
1935  }
1936 
1938  }
1939 
1941  }
1942 
1944  continue;
1945 
1946  for (;;) {
1947  const char *desc = NULL;
1948  AVPacket *pkt = ost->pkt;
1949  int pkt_size;
1950 
1951  switch (enc->codec_type) {
1952  case AVMEDIA_TYPE_AUDIO:
1953  desc = "audio";
1954  break;
1955  case AVMEDIA_TYPE_VIDEO:
1956  desc = "video";
1957  break;
1958  default:
1959  av_assert0(0);
1960  }
1961 
1963 
1964  while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1965  ret = avcodec_send_frame(enc, NULL);
1966  if (ret < 0) {
1967  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1968  desc,
1969  av_err2str(ret));
1970  exit_program(1);
1971  }
1972  }
1973 
1974  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1975  if (ret < 0 && ret != AVERROR_EOF) {
1976  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1977  desc,
1978  av_err2str(ret));
1979  exit_program(1);
1980  }
1981  if (ost->logfile && enc->stats_out) {
1982  fprintf(ost->logfile, "%s", enc->stats_out);
1983  }
1984  if (ret == AVERROR_EOF) {
1985  output_packet(of, pkt, ost, 1);
1986  break;
1987  }
1988  if (ost->finished & MUXER_FINISHED) {
1990  continue;
1991  }
1993  pkt_size = pkt->size;
1994  output_packet(of, pkt, ost, 0);
1996  do_video_stats(ost, pkt_size);
1997  }
1998  }
1999  }
2000 }
2001 
2002 /*
2003  * Check whether a packet from ist should be written into ost at this time
2004  */
2006 {
2008  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2009 
2010  if (ost->source_index != ist_index)
2011  return 0;
2012 
2013  if (ost->finished)
2014  return 0;
2015 
2016  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2017  return 0;
2018 
2019  return 1;
2020 }
2021 
2023 {
2025  InputFile *f = input_files [ist->file_index];
2026  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2027  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2028  AVPacket *opkt = ost->pkt;
2029 
2030  av_packet_unref(opkt);
2031  // EOF: flush output bitstream filters.
2032  if (!pkt) {
2033  output_packet(of, opkt, ost, 1);
2034  return;
2035  }
2036 
2037  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2039  return;
2040 
2041  if (!ost->frame_number && !ost->copy_prior_start) {
2042  int64_t comp_start = start_time;
2043  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2044  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2045  if (pkt->pts == AV_NOPTS_VALUE ?
2046  ist->pts < comp_start :
2047  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2048  return;
2049  }
2050 
2051  if (of->recording_time != INT64_MAX &&
2052  ist->pts >= of->recording_time + start_time) {
2054  return;
2055  }
2056 
2057  if (f->recording_time != INT64_MAX) {
2058  start_time = 0;
2059  if (copy_ts) {
2060  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2061  start_time += start_at_zero ? 0 : f->ctx->start_time;
2062  }
2063  if (ist->pts >= f->recording_time + start_time) {
2065  return;
2066  }
2067  }
2068 
2069  if (av_packet_ref(opkt, pkt) < 0)
2070  exit_program(1);
2071 
2072  if (pkt->pts != AV_NOPTS_VALUE)
2073  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2074 
2075  if (pkt->dts == AV_NOPTS_VALUE) {
2077  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2078  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2079  if(!duration)
2080  duration = ist->dec_ctx->frame_size;
2081  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2082  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2083  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2084  /* dts will be set immediately afterwards to what pts is now */
2085  opkt->pts = opkt->dts - ost_tb_start_time;
2086  } else
2087  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2088  opkt->dts -= ost_tb_start_time;
2089 
2090  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2091 
2092  ost->sync_opts += opkt->duration;
2093 
2094  output_packet(of, opkt, ost, 0);
2095 }
2096 
2098 {
2099  AVCodecContext *dec = ist->dec_ctx;
2100 
2101  if (!dec->channel_layout) {
2102  char layout_name[256];
2103 
2104  if (dec->channels > ist->guess_layout_max)
2105  return 0;
2106  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2107  if (!dec->channel_layout)
2108  return 0;
2109  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2110  dec->channels, dec->channel_layout);
2111  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2112  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2113  }
2114  return 1;
2115 }
2116 
2118 {
2119  if (*got_output || ret<0)
2120  decode_error_stat[ret<0] ++;
2121 
2122  if (ret < 0 && exit_on_error)
2123  exit_program(1);
2124 
2125  if (*got_output && ist) {
2126  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2128  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2129  if (exit_on_error)
2130  exit_program(1);
2131  }
2132  }
2133 }
2134 
2135 // Filters can be configured only if the formats of all inputs are known.
2137 {
2138  int i;
2139  for (i = 0; i < fg->nb_inputs; i++) {
2140  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2141  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2142  return 0;
2143  }
2144  return 1;
2145 }
2146 
2147 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
2148 {
2149  FilterGraph *fg = ifilter->graph;
2150  AVFrameSideData *sd;
2151  int need_reinit, ret;
2152  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
2153 
2154  if (keep_reference)
2155  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
2156 
2157  /* determine if the parameters for this input changed */
2158  need_reinit = ifilter->format != frame->format;
2159 
2160  switch (ifilter->ist->st->codecpar->codec_type) {
2161  case AVMEDIA_TYPE_AUDIO:
2162  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2163  ifilter->channels != frame->channels ||
2164  ifilter->channel_layout != frame->channel_layout;
2165  break;
2166  case AVMEDIA_TYPE_VIDEO:
2167  need_reinit |= ifilter->width != frame->width ||
2168  ifilter->height != frame->height;
2169  break;
2170  }
2171 
2172  if (!ifilter->ist->reinit_filters && fg->graph)
2173  need_reinit = 0;
2174 
2175  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2176  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2177  need_reinit = 1;
2178 
2180  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
2181  need_reinit = 1;
2182  } else if (ifilter->displaymatrix)
2183  need_reinit = 1;
2184 
2185  if (need_reinit) {
2187  if (ret < 0)
2188  return ret;
2189  }
2190 
2191  /* (re)init the graph if possible, otherwise buffer the frame and return */
2192  if (need_reinit || !fg->graph) {
2193  if (!ifilter_has_all_input_formats(fg)) {
2195  if (!tmp)
2196  return AVERROR(ENOMEM);
2197 
2200  if (ret < 0) {
2201  av_frame_free(&tmp);
2202  return ret;
2203  }
2204  }
2206  return 0;
2207  }
2208 
2209  ret = reap_filters(1);
2210  if (ret < 0 && ret != AVERROR_EOF) {
2211  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2212  return ret;
2213  }
2214 
2215  ret = configure_filtergraph(fg);
2216  if (ret < 0) {
2217  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2218  return ret;
2219  }
2220  }
2221 
2222  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2223  if (ret < 0) {
2224  if (ret != AVERROR_EOF)
2225  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2226  return ret;
2227  }
2228 
2229  return 0;
2230 }
2231 
2233 {
2234  int ret;
2235 
2236  ifilter->eof = 1;
2237 
2238  if (ifilter->filter) {
2240  if (ret < 0)
2241  return ret;
2242  } else {
2243  // the filtergraph was never configured
2244  if (ifilter->format < 0)
2247  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2248  return AVERROR_INVALIDDATA;
2249  }
2250  }
2251 
2252  return 0;
2253 }
2254 
2255 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2256 // There is the following difference: if you got a frame, you must call
2257 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2258 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2259 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2260 {
2261  int ret;
2262 
2263  *got_frame = 0;
2264 
2265  if (pkt) {
2266  ret = avcodec_send_packet(avctx, pkt);
2267  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2268  // decoded frames with avcodec_receive_frame() until done.
2269  if (ret < 0 && ret != AVERROR_EOF)
2270  return ret;
2271  }
2272 
2273  ret = avcodec_receive_frame(avctx, frame);
2274  if (ret < 0 && ret != AVERROR(EAGAIN))
2275  return ret;
2276  if (ret >= 0)
2277  *got_frame = 1;
2278 
2279  return 0;
2280 }
2281 
2283 {
2284  int i, ret;
2285 
2286  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2287  for (i = 0; i < ist->nb_filters; i++) {
2288  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2289  if (ret == AVERROR_EOF)
2290  ret = 0; /* ignore */
2291  if (ret < 0) {
2293  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2294  break;
2295  }
2296  }
2297  return ret;
2298 }
2299 
2301  int *decode_failed)
2302 {
2303  AVFrame *decoded_frame = ist->decoded_frame;
2304  AVCodecContext *avctx = ist->dec_ctx;
2305  int ret, err = 0;
2306  AVRational decoded_frame_tb;
2307 
2309  ret = decode(avctx, decoded_frame, got_output, pkt);
2310  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2311  if (ret < 0)
2312  *decode_failed = 1;
2313 
2314  if (ret >= 0 && avctx->sample_rate <= 0) {
2315  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2317  }
2318 
2319  if (ret != AVERROR_EOF)
2321 
2322  if (!*got_output || ret < 0)
2323  return ret;
2324 
2325  ist->samples_decoded += decoded_frame->nb_samples;
2326  ist->frames_decoded++;
2327 
2328  /* increment next_dts to use for the case where the input stream does not
2329  have timestamps or there are multiple frames in the packet */
2330  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2331  avctx->sample_rate;
2332  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2333  avctx->sample_rate;
2334 
2335  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2336  decoded_frame_tb = ist->st->time_base;
2337  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2338  decoded_frame->pts = pkt->pts;
2339  decoded_frame_tb = ist->st->time_base;
2340  }else {
2341  decoded_frame->pts = ist->dts;
2342  decoded_frame_tb = AV_TIME_BASE_Q;
2343  }
2344  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2345  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2346  ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
2347  if (pkt)
2348  ist->prev_pkt_pts = pkt->pts;
2350  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2351  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2352  (AVRational){1, avctx->sample_rate});
2353  ist->nb_samples = decoded_frame->nb_samples;
2355 
2357  return err < 0 ? err : ret;
2358 }
2359 
2360 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2361  int *decode_failed)
2362 {
2363  AVFrame *decoded_frame = ist->decoded_frame;
2364  int i, ret = 0, err = 0;
2365  int64_t best_effort_timestamp;
2366  int64_t dts = AV_NOPTS_VALUE;
2367 
2368  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2369  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2370  // skip the packet.
2371  if (!eof && pkt && pkt->size == 0)
2372  return 0;
2373 
2374  if (ist->dts != AV_NOPTS_VALUE)
2375  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2376  if (pkt) {
2377  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2378  }
2379 
2380  // The old code used to set dts on the drain packet, which does not work
2381  // with the new API anymore.
2382  if (eof) {
2383  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384  if (!new)
2385  return AVERROR(ENOMEM);
2386  ist->dts_buffer = new;
2387  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2388  }
2389 
2391  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2392  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2393  if (ret < 0)
2394  *decode_failed = 1;
2395 
2396  // The following line may be required in some cases where there is no parser
2397  // or the parser does not has_b_frames correctly
2398  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2399  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2400  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2401  } else
2402  av_log(ist->dec_ctx, AV_LOG_WARNING,
2403  "video_delay is larger in decoder than demuxer %d > %d.\n"
2404  "If you want to help, upload a sample "
2405  "of this file to https://streams.videolan.org/upload/ "
2406  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2407  ist->dec_ctx->has_b_frames,
2408  ist->st->codecpar->video_delay);
2409  }
2410 
2411  if (ret != AVERROR_EOF)
2413 
2414  if (*got_output && ret >= 0) {
2415  if (ist->dec_ctx->width != decoded_frame->width ||
2416  ist->dec_ctx->height != decoded_frame->height ||
2417  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2418  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2422  ist->dec_ctx->width,
2423  ist->dec_ctx->height,
2424  ist->dec_ctx->pix_fmt);
2425  }
2426  }
2427 
2428  if (!*got_output || ret < 0)
2429  return ret;
2430 
2431  if(ist->top_field_first>=0)
2432  decoded_frame->top_field_first = ist->top_field_first;
2433 
2434  ist->frames_decoded++;
2435 
2436  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2437  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2438  if (err < 0)
2439  goto fail;
2440  }
2441  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2442 
2443  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2444  *duration_pts = decoded_frame->pkt_duration;
2445 
2446  if (ist->framerate.num)
2447  best_effort_timestamp = ist->cfr_next_pts++;
2448 
2449  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2450  best_effort_timestamp = ist->dts_buffer[0];
2451 
2452  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2453  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2454  ist->nb_dts_buffer--;
2455  }
2456 
2457  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2458  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2459 
2460  if (ts != AV_NOPTS_VALUE)
2461  ist->next_pts = ist->pts = ts;
2462  }
2463 
2464  if (debug_ts) {
2465  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2466  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2467  ist->st->index, av_ts2str(decoded_frame->pts),
2468  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2469  best_effort_timestamp,
2470  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2472  ist->st->time_base.num, ist->st->time_base.den);
2473  }
2474 
2475  if (ist->st->sample_aspect_ratio.num)
2476  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2477 
2479 
2480 fail:
2482  return err < 0 ? err : ret;
2483 }
2484 
2486  int *decode_failed)
2487 {
2489  int free_sub = 1;
2490  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2491  &subtitle, got_output, pkt);
2492 
2494 
2495  if (ret < 0 || !*got_output) {
2496  *decode_failed = 1;
2497  if (!pkt->size)
2499  return ret;
2500  }
2501 
2502  if (ist->fix_sub_duration) {
2503  int end = 1;
2504  if (ist->prev_sub.got_output) {
2505  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2506  1000, AV_TIME_BASE);
2507  if (end < ist->prev_sub.subtitle.end_display_time) {
2508  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2509  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2510  ist->prev_sub.subtitle.end_display_time, end,
2511  end <= 0 ? ", dropping it" : "");
2512  ist->prev_sub.subtitle.end_display_time = end;
2513  }
2514  }
2515  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2516  FFSWAP(int, ret, ist->prev_sub.ret);
2517  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2518  if (end <= 0)
2519  goto out;
2520  }
2521 
2522  if (!*got_output)
2523  return ret;
2524 
2525  if (ist->sub2video.frame) {
2526  sub2video_update(ist, INT64_MIN, &subtitle);
2527  } else if (ist->nb_filters) {
2528  if (!ist->sub2video.sub_queue)
2529  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2530  if (!ist->sub2video.sub_queue)
2531  exit_program(1);
2532  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2533  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2534  if (ret < 0)
2535  exit_program(1);
2536  }
2537  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2538  free_sub = 0;
2539  }
2540 
2541  if (!subtitle.num_rects)
2542  goto out;
2543 
2544  ist->frames_decoded++;
2545 
2546  for (i = 0; i < nb_output_streams; i++) {
2548 
2550  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2551  continue;
2552 
2554  }
2555 
2556 out:
2557  if (free_sub)
2559  return ret;
2560 }
2561 
2563 {
2564  int i, ret;
2565  /* TODO keep pts also in stream time base to avoid converting back */
2566  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2568 
2569  for (i = 0; i < ist->nb_filters; i++) {
2570  ret = ifilter_send_eof(ist->filters[i], pts);
2571  if (ret < 0)
2572  return ret;
2573  }
2574  return 0;
2575 }
2576 
2577 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2578 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2579 {
2580  int ret = 0, i;
2581  int repeating = 0;
2582  int eof_reached = 0;
2583 
2584  AVPacket *avpkt = ist->pkt;
2585 
2586  if (!ist->saw_first_ts) {
2587  ist->first_dts =
2588  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2589  ist->pts = 0;
2590  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2591  ist->first_dts =
2592  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2593  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2594  }
2595  ist->saw_first_ts = 1;
2596  }
2597 
2598  if (ist->next_dts == AV_NOPTS_VALUE)
2599  ist->next_dts = ist->dts;
2600  if (ist->next_pts == AV_NOPTS_VALUE)
2601  ist->next_pts = ist->pts;
2602 
2603  if (pkt) {
2604  av_packet_unref(avpkt);
2605  ret = av_packet_ref(avpkt, pkt);
2606  if (ret < 0)
2607  return ret;
2608  }
2609 
2610  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2611  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2612  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2613  ist->next_pts = ist->pts = ist->dts;
2614  }
2615 
2616  // while we have more to decode or while the decoder did output something on EOF
2617  while (ist->decoding_needed) {
2618  int64_t duration_dts = 0;
2619  int64_t duration_pts = 0;
2620  int got_output = 0;
2621  int decode_failed = 0;
2622 
2623  ist->pts = ist->next_pts;
2624  ist->dts = ist->next_dts;
2625 
2626  switch (ist->dec_ctx->codec_type) {
2627  case AVMEDIA_TYPE_AUDIO:
2628  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2629  &decode_failed);
2630  av_packet_unref(avpkt);
2631  break;
2632  case AVMEDIA_TYPE_VIDEO:
2633  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2634  &decode_failed);
2635  if (!repeating || !pkt || got_output) {
2636  if (pkt && pkt->duration) {
2637  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2638  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2639  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2640  duration_dts = ((int64_t)AV_TIME_BASE *
2641  ist->dec_ctx->framerate.den * ticks) /
2642  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2643  }
2644 
2645  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2646  ist->next_dts += duration_dts;
2647  }else
2648  ist->next_dts = AV_NOPTS_VALUE;
2649  }
2650 
2651  if (got_output) {
2652  if (duration_pts > 0) {
2653  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2654  } else {
2655  ist->next_pts += duration_dts;
2656  }
2657  }
2658  av_packet_unref(avpkt);
2659  break;
2660  case AVMEDIA_TYPE_SUBTITLE:
2661  if (repeating)
2662  break;
2663  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2664  if (!pkt && ret >= 0)
2665  ret = AVERROR_EOF;
2666  av_packet_unref(avpkt);
2667  break;
2668  default:
2669  return -1;
2670  }
2671 
2672  if (ret == AVERROR_EOF) {
2673  eof_reached = 1;
2674  break;
2675  }
2676 
2677  if (ret < 0) {
2678  if (decode_failed) {
2679  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2680  ist->file_index, ist->st->index, av_err2str(ret));
2681  } else {
2682  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2683  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2684  }
2685  if (!decode_failed || exit_on_error)
2686  exit_program(1);
2687  break;
2688  }
2689 
2690  if (got_output)
2691  ist->got_output = 1;
2692 
2693  if (!got_output)
2694  break;
2695 
2696  // During draining, we might get multiple output frames in this loop.
2697  // ffmpeg.c does not drain the filter chain on configuration changes,
2698  // which means if we send multiple frames at once to the filters, and
2699  // one of those frames changes configuration, the buffered frames will
2700  // be lost. This can upset certain FATE tests.
2701  // Decode only 1 frame per call on EOF to appease these FATE tests.
2702  // The ideal solution would be to rewrite decoding to use the new
2703  // decoding API in a better way.
2704  if (!pkt)
2705  break;
2706 
2707  repeating = 1;
2708  }
2709 
2710  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2711  /* except when looping we need to flush but not to send an EOF */
2712  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2713  int ret = send_filter_eof(ist);
2714  if (ret < 0) {
2715  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2716  exit_program(1);
2717  }
2718  }
2719 
2720  /* handle stream copy */
2721  if (!ist->decoding_needed && pkt) {
2722  ist->dts = ist->next_dts;
2723  switch (ist->dec_ctx->codec_type) {
2724  case AVMEDIA_TYPE_AUDIO:
2725  av_assert1(pkt->duration >= 0);
2726  if (ist->dec_ctx->sample_rate) {
2727  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2728  ist->dec_ctx->sample_rate;
2729  } else {
2730  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2731  }
2732  break;
2733  case AVMEDIA_TYPE_VIDEO:
2734  if (ist->framerate.num) {
2735  // TODO: Remove work-around for c99-to-c89 issue 7
2736  AVRational time_base_q = AV_TIME_BASE_Q;
2737  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2738  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2739  } else if (pkt->duration) {
2740  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2741  } else if(ist->dec_ctx->framerate.num != 0) {
2742  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2743  ist->next_dts += ((int64_t)AV_TIME_BASE *
2744  ist->dec_ctx->framerate.den * ticks) /
2745  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2746  }
2747  break;
2748  }
2749  ist->pts = ist->dts;
2750  ist->next_pts = ist->next_dts;
2751  }
2752  for (i = 0; i < nb_output_streams; i++) {
2754 
2756  continue;
2757 
2758  do_streamcopy(ist, ost, pkt);
2759  }
2760 
2761  return !eof_reached;
2762 }
2763 
2764 static int print_sdp(void)
2765 {
2766  char sdp[16384];
2767  int i;
2768  int j, ret;
2769  AVIOContext *sdp_pb;
2770  AVFormatContext **avc;
2771 
2772  for (i = 0; i < nb_output_files; i++) {
2773  if (!output_files[i]->header_written)
2774  return 0;
2775  }
2776 
2777  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2778  if (!avc)
2779  exit_program(1);
2780  for (i = 0, j = 0; i < nb_output_files; i++) {
2781  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2782  avc[j] = output_files[i]->ctx;
2783  j++;
2784  }
2785  }
2786 
2787  if (!j) {
2788  av_log(NULL, AV_LOG_ERROR, "No output streams in the SDP.\n");
2789  ret = AVERROR(EINVAL);
2790  goto fail;
2791  }
2792 
2793  ret = av_sdp_create(avc, j, sdp, sizeof(sdp));
2794  if (ret < 0)
2795  goto fail;
2796 
2797  if (!sdp_filename) {
2798  printf("SDP:\n%s\n", sdp);
2799  fflush(stdout);
2800  } else {
2802  if (ret < 0) {
2803  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2804  goto fail;
2805  }
2806 
2807  avio_print(sdp_pb, sdp);
2808  avio_closep(&sdp_pb);
2810  }
2811 
2812 fail:
2813  av_freep(&avc);
2814  return ret;
2815 }
2816 
2818 {
2819  InputStream *ist = s->opaque;
2820  const enum AVPixelFormat *p;
2821  int ret;
2822 
2823  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2825  const AVCodecHWConfig *config = NULL;
2826  int i;
2827 
2828  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2829  break;
2830 
2831  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2832  ist->hwaccel_id == HWACCEL_AUTO) {
2833  for (i = 0;; i++) {
2834  config = avcodec_get_hw_config(s->codec, i);
2835  if (!config)
2836  break;
2837  if (!(config->methods &
2839  continue;
2840  if (config->pix_fmt == *p)
2841  break;
2842  }
2843  }
2844  if (config && config->device_type == ist->hwaccel_device_type) {
2846  if (ret < 0) {
2847  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2849  "%s hwaccel requested for input stream #%d:%d, "
2850  "but cannot be initialized.\n",
2851  av_hwdevice_get_type_name(config->device_type),
2852  ist->file_index, ist->st->index);
2853  return AV_PIX_FMT_NONE;
2854  }
2855  continue;
2856  }
2857 
2858  ist->hwaccel_pix_fmt = *p;
2859  break;
2860  }
2861  }
2862 
2863  return *p;
2864 }
2865 
2866 static int init_input_stream(int ist_index, char *error, int error_len)
2867 {
2868  int ret;
2869  InputStream *ist = input_streams[ist_index];
2870 
2871  if (ist->decoding_needed) {
2872  const AVCodec *codec = ist->dec;
2873  if (!codec) {
2874  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2875  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2876  return AVERROR(EINVAL);
2877  }
2878 
2879  ist->dec_ctx->opaque = ist;
2880  ist->dec_ctx->get_format = get_format;
2881 #if LIBAVCODEC_VERSION_MAJOR < 60
2883  ist->dec_ctx->thread_safe_callbacks = 1;
2885 #endif
2886 
2887  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2888  (ist->decoding_needed & DECODING_FOR_OST)) {
2889  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2890  if (ist->decoding_needed & DECODING_FOR_FILTER)
2891  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2892  }
2893 
2894  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2895  * audio, and video decoders such as cuvid or mediacodec */
2896  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2897 
2898  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2899  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2900  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2901  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2902  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2903 
2905  if (ret < 0) {
2906  snprintf(error, error_len, "Device setup failed for "
2907  "decoder on input stream #%d:%d : %s",
2908  ist->file_index, ist->st->index, av_err2str(ret));
2909  return ret;
2910  }
2911 
2912  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2913  if (ret == AVERROR_EXPERIMENTAL)
2914  abort_codec_experimental(codec, 0);
2915 
2916  snprintf(error, error_len,
2917  "Error while opening decoder for input stream "
2918  "#%d:%d : %s",
2919  ist->file_index, ist->st->index, av_err2str(ret));
2920  return ret;
2921  }
2922  assert_avoptions(ist->decoder_opts);
2923  }
2924 
2925  ist->next_pts = AV_NOPTS_VALUE;
2926  ist->next_dts = AV_NOPTS_VALUE;
2927 
2928  return 0;
2929 }
2930 
2932 {
2933  if (ost->source_index >= 0)
2934  return input_streams[ost->source_index];
2935  return NULL;
2936 }
2937 
2938 static int compare_int64(const void *a, const void *b)
2939 {
2940  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2941 }
2942 
2943 /* open the muxer when all the streams are initialized */
2945 {
2946  int ret, i;
2947 
2948  for (i = 0; i < of->ctx->nb_streams; i++) {
2950  if (!ost->initialized)
2951  return 0;
2952  }
2953 
2954  of->ctx->interrupt_callback = int_cb;
2955 
2956  ret = avformat_write_header(of->ctx, &of->opts);
2957  if (ret < 0) {
2959  "Could not write header for output file #%d "
2960  "(incorrect codec parameters ?): %s\n",
2962  return ret;
2963  }
2964  //assert_avoptions(of->opts);
2965  of->header_written = 1;
2966 
2967  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2968  nb_output_dumped++;
2969 
2970  if (sdp_filename || want_sdp) {
2971  ret = print_sdp();
2972  if (ret < 0) {
2973  av_log(NULL, AV_LOG_ERROR, "Error writing the SDP.\n");
2974  return ret;
2975  }
2976  }
2977 
2978  /* flush the muxing queues */
2979  for (i = 0; i < of->ctx->nb_streams; i++) {
2981 
2982  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2983  if (!av_fifo_size(ost->muxing_queue))
2985 
2986  while (av_fifo_size(ost->muxing_queue)) {
2987  AVPacket *pkt;
2990  write_packet(of, pkt, ost, 1);
2991  av_packet_free(&pkt);
2992  }
2993  }
2994 
2995  return 0;
2996 }
2997 
2999 {
3001  int ret;
3002 
3003  if (!ctx)
3004  return 0;
3005 
3006  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3007  if (ret < 0)
3008  return ret;
3009 
3010  ctx->time_base_in = ost->st->time_base;
3011 
3012  ret = av_bsf_init(ctx);
3013  if (ret < 0) {
3014  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3015  ctx->filter->name);
3016  return ret;
3017  }
3018 
3019  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3020  if (ret < 0)
3021  return ret;
3022  ost->st->time_base = ctx->time_base_out;
3023 
3024  return 0;
3025 }
3026 
3028 {
3031  AVCodecParameters *par_dst = ost->st->codecpar;
3032  AVCodecParameters *par_src = ost->ref_par;
3033  AVRational sar;
3034  int i, ret;
3035  uint32_t codec_tag = par_dst->codec_tag;
3036 
3037  av_assert0(ist && !ost->filter);
3038 
3039  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3040  if (ret >= 0)
3042  if (ret < 0) {
3044  "Error setting up codec context options.\n");
3045  return ret;
3046  }
3047 
3049  if (ret < 0) {
3051  "Error getting reference codec parameters.\n");
3052  return ret;
3053  }
3054 
3055  if (!codec_tag) {
3056  unsigned int codec_tag_tmp;
3057  if (!of->ctx->oformat->codec_tag ||
3058  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3059  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3060  codec_tag = par_src->codec_tag;
3061  }
3062 
3063  ret = avcodec_parameters_copy(par_dst, par_src);
3064  if (ret < 0)
3065  return ret;
3066 
3067  par_dst->codec_tag = codec_tag;
3068 
3069  if (!ost->frame_rate.num)
3070  ost->frame_rate = ist->framerate;
3071 
3072  if (ost->frame_rate.num)
3074  else
3075  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3076 
3078  if (ret < 0)
3079  return ret;
3080 
3081  // copy timebase while removing common factors
3082  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3083  if (ost->frame_rate.num)
3085  else
3087  }
3088 
3089  // copy estimated duration as a hint to the muxer
3090  if (ost->st->duration <= 0 && ist->st->duration > 0)
3091  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3092 
3093  if (ist->st->nb_side_data) {
3094  for (i = 0; i < ist->st->nb_side_data; i++) {
3095  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3096  uint8_t *dst_data;
3097 
3098  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3099  if (!dst_data)
3100  return AVERROR(ENOMEM);
3101  memcpy(dst_data, sd_src->data, sd_src->size);
3102  }
3103  }
3104 
3105  if (ost->rotate_overridden) {
3107  sizeof(int32_t) * 9);
3108  if (sd)
3110  }
3111 
3112  switch (par_dst->codec_type) {
3113  case AVMEDIA_TYPE_AUDIO:
3114  if (audio_volume != 256) {
3115  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3116  exit_program(1);
3117  }
3118  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3119  par_dst->block_align= 0;
3120  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3121  par_dst->block_align= 0;
3122  break;
3123  case AVMEDIA_TYPE_VIDEO:
3124  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3125  sar =
3127  (AVRational){ par_dst->height, par_dst->width });
3128  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3129  "with stream copy may produce invalid files\n");
3130  }
3131  else if (ist->st->sample_aspect_ratio.num)
3132  sar = ist->st->sample_aspect_ratio;
3133  else
3134  sar = par_src->sample_aspect_ratio;
3135  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3136  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3137  ost->st->r_frame_rate = ist->st->r_frame_rate;
3138  break;
3139  }
3140 
3141  ost->mux_timebase = ist->st->time_base;
3142 
3143  return 0;
3144 }
3145 
3147 {
3148  const AVDictionaryEntry *e;
3149 
3150  uint8_t *encoder_string;
3151  int encoder_string_len;
3152  int format_flags = 0;
3153  int codec_flags = ost->enc_ctx->flags;
3154 
3155  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3156  return;
3157 
3158  e = av_dict_get(of->opts, "fflags", NULL, 0);
3159  if (e) {
3160  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3161  if (!o)
3162  return;
3163  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3164  }
3165  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3166  if (e) {
3167  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3168  if (!o)
3169  return;
3171  }
3172 
3173  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3174  encoder_string = av_mallocz(encoder_string_len);
3175  if (!encoder_string)
3176  exit_program(1);
3177 
3178  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3179  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3180  else
3181  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3182  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3183  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3185 }
3186 
3188  AVCodecContext *avctx)
3189 {
3190  char *p;
3191  int n = 1, i, size, index = 0;
3192  int64_t t, *pts;
3193 
3194  for (p = kf; *p; p++)
3195  if (*p == ',')
3196  n++;
3197  size = n;
3198  pts = av_malloc_array(size, sizeof(*pts));
3199  if (!pts) {
3200  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3201  exit_program(1);
3202  }
3203 
3204  p = kf;
3205  for (i = 0; i < n; i++) {
3206  char *next = strchr(p, ',');
3207 
3208  if (next)
3209  *next++ = 0;
3210 
3211  if (!memcmp(p, "chapters", 8)) {
3212 
3214  int j;
3215 
3216  if (avf->nb_chapters > INT_MAX - size ||
3217  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3218  sizeof(*pts)))) {
3220  "Could not allocate forced key frames array.\n");
3221  exit_program(1);
3222  }
3223  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3224  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3225 
3226  for (j = 0; j < avf->nb_chapters; j++) {
3227  AVChapter *c = avf->chapters[j];
3228  av_assert1(index < size);
3229  pts[index++] = av_rescale_q(c->start, c->time_base,
3230  avctx->time_base) + t;
3231  }
3232 
3233  } else {
3234 
3235  t = parse_time_or_die("force_key_frames", p, 1);
3236  av_assert1(index < size);
3237  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3238 
3239  }
3240 
3241  p = next;
3242  }
3243 
3244  av_assert0(index == size);
3245  qsort(pts, size, sizeof(*pts), compare_int64);
3247  ost->forced_kf_pts = pts;
3248 }
3249 
3250 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3251 {
3253  AVCodecContext *enc_ctx = ost->enc_ctx;
3254  AVFormatContext *oc;
3255 
3256  if (ost->enc_timebase.num > 0) {
3257  enc_ctx->time_base = ost->enc_timebase;
3258  return;
3259  }
3260 
3261  if (ost->enc_timebase.num < 0) {
3262  if (ist) {
3263  enc_ctx->time_base = ist->st->time_base;
3264  return;
3265  }
3266 
3267  oc = output_files[ost->file_index]->ctx;
3268  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3269  }
3270 
3271  enc_ctx->time_base = default_time_base;
3272 }
3273 
3275 {
3277  AVCodecContext *enc_ctx = ost->enc_ctx;
3280  int ret;
3281 
3283 
3284  if (ist) {
3285  dec_ctx = ist->dec_ctx;
3286  }
3287 
3288  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3289  if (!ost->frame_rate.num)
3291  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3292  ost->frame_rate = (AVRational){25, 1};
3294  "No information "
3295  "about the input framerate is available. Falling "
3296  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3297  "if you want a different framerate.\n",
3298  ost->file_index, ost->index);
3299  }
3300 
3301  if (ost->max_frame_rate.num &&
3303  !ost->frame_rate.den))
3305 
3306  if (ost->enc->supported_framerates && !ost->force_fps) {
3307  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3308  ost->frame_rate = ost->enc->supported_framerates[idx];
3309  }
3310  // reduce frame rate for mpeg4 to be within the spec limits
3311  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3313  ost->frame_rate.num, ost->frame_rate.den, 65535);
3314  }
3315  }
3316 
3317  switch (enc_ctx->codec_type) {
3318  case AVMEDIA_TYPE_AUDIO:
3323 
3324  if (ost->bits_per_raw_sample)
3326  else if (dec_ctx && ost->filter->graph->is_meta)
3328  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3329 
3331  break;
3332 
3333  case AVMEDIA_TYPE_VIDEO:
3335 
3336  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3338  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3340  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3341  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3342  }
3343 
3344  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3345  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3347  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3348  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3350 
3352 
3353  if (ost->bits_per_raw_sample)
3355  else if (dec_ctx && ost->filter->graph->is_meta)
3357  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3358 
3359  if (frame) {
3360  enc_ctx->color_range = frame->color_range;
3361  enc_ctx->color_primaries = frame->color_primaries;
3362  enc_ctx->color_trc = frame->color_trc;
3363  enc_ctx->colorspace = frame->colorspace;
3364  enc_ctx->chroma_sample_location = frame->chroma_location;
3365  }
3366 
3367  enc_ctx->framerate = ost->frame_rate;
3368 
3370 
3371  // Field order: autodetection
3372  if (frame) {
3374  ost->top_field_first >= 0)
3375  frame->top_field_first = !!ost->top_field_first;
3376 
3377  if (frame->interlaced_frame) {
3378  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3379  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3380  else
3381  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3382  } else
3383  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3384  }
3385 
3386  // Field order: override
3387  if (ost->top_field_first == 0) {
3388  enc_ctx->field_order = AV_FIELD_BB;
3389  } else if (ost->top_field_first == 1) {
3390  enc_ctx->field_order = AV_FIELD_TT;
3391  }
3392 
3393  if (ost->forced_keyframes) {
3394  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3397  if (ret < 0) {
3399  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3400  return ret;
3401  }
3406 
3407  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3408  // parse it only for static kf timings
3409  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3411  }
3412  }
3413  break;
3414  case AVMEDIA_TYPE_SUBTITLE:
3415  enc_ctx->time_base = AV_TIME_BASE_Q;
3416  if (!enc_ctx->width) {
3419  }
3420  break;
3421  case AVMEDIA_TYPE_DATA:
3422  break;
3423  default:
3424  abort();
3425  break;
3426  }
3427 
3428  ost->mux_timebase = enc_ctx->time_base;
3429 
3430  return 0;
3431 }
3432 
3434  char *error, int error_len)
3435 {
3436  int ret = 0;
3437 
3438  if (ost->encoding_needed) {
3439  const AVCodec *codec = ost->enc;
3440  AVCodecContext *dec = NULL;
3441  InputStream *ist;
3442 
3444  if (ret < 0)
3445  return ret;
3446 
3447  if ((ist = get_input_stream(ost)))
3448  dec = ist->dec_ctx;
3449  if (dec && dec->subtitle_header) {
3450  /* ASS code assumes this buffer is null terminated so add extra byte. */
3451  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3452  if (!ost->enc_ctx->subtitle_header)
3453  return AVERROR(ENOMEM);
3454  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3455  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3456  }
3457  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3458  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3459 
3461  if (ret < 0) {
3462  snprintf(error, error_len, "Device setup failed for "
3463  "encoder on output stream #%d:%d : %s",
3465  return ret;
3466  }
3467 
3468  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3469  int input_props = 0, output_props = 0;
3470  AVCodecDescriptor const *input_descriptor =
3471  avcodec_descriptor_get(dec->codec_id);
3472  AVCodecDescriptor const *output_descriptor =
3474  if (input_descriptor)
3475  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3476  if (output_descriptor)
3477  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3478  if (input_props && output_props && input_props != output_props) {
3479  snprintf(error, error_len,
3480  "Subtitle encoding currently only possible from text to text "
3481  "or bitmap to bitmap");
3482  return AVERROR_INVALIDDATA;
3483  }
3484  }
3485 
3486  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3487  if (ret == AVERROR_EXPERIMENTAL)
3488  abort_codec_experimental(codec, 1);
3489  snprintf(error, error_len,
3490  "Error while opening encoder for output stream #%d:%d - "
3491  "maybe incorrect parameters such as bit_rate, rate, width or height",
3492  ost->file_index, ost->index);
3493  return ret;
3494  }
3495  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3496  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3498  ost->enc_ctx->frame_size);
3500  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3501  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3502  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3503  " It takes bits/s as argument, not kbits/s\n");
3504 
3506  if (ret < 0) {
3508  "Error initializing the output stream codec context.\n");
3509  exit_program(1);
3510  }
3511 
3512  if (ost->enc_ctx->nb_coded_side_data) {
3513  int i;
3514 
3515  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3516  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3517  uint8_t *dst_data;
3518 
3519  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3520  if (!dst_data)
3521  return AVERROR(ENOMEM);
3522  memcpy(dst_data, sd_src->data, sd_src->size);
3523  }
3524  }
3525 
3526  /*
3527  * Add global input side data. For now this is naive, and copies it
3528  * from the input stream's global side data. All side data should
3529  * really be funneled over AVFrame and libavfilter, then added back to
3530  * packet side data, and then potentially using the first packet for
3531  * global side data.
3532  */
3533  if (ist) {
3534  int i;
3535  for (i = 0; i < ist->st->nb_side_data; i++) {
3536  AVPacketSideData *sd = &ist->st->side_data[i];
3537  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3538  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3539  if (!dst)
3540  return AVERROR(ENOMEM);
3541  memcpy(dst, sd->data, sd->size);
3542  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3543  av_display_rotation_set((uint32_t *)dst, 0);
3544  }
3545  }
3546  }
3547 
3548  // copy timebase while removing common factors
3549  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3551 
3552  // copy estimated duration as a hint to the muxer
3553  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3554  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3555  } else if (ost->stream_copy) {
3557  if (ret < 0)
3558  return ret;
3559  }
3560 
3561  /* initialize bitstream filters for the output stream
3562  * needs to be done here, because the codec id for streamcopy is not
3563  * known until now */
3565  if (ret < 0)
3566  return ret;
3567 
3568  ost->initialized = 1;
3569 
3571  if (ret < 0)
3572  return ret;
3573 
3574  return ret;
3575 }
3576 
3577 static void report_new_stream(int input_index, AVPacket *pkt)
3578 {
3579  InputFile *file = input_files[input_index];
3580  AVStream *st = file->ctx->streams[pkt->stream_index];
3581 
3582  if (pkt->stream_index < file->nb_streams_warn)
3583  return;
3584  av_log(file->ctx, AV_LOG_WARNING,
3585  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3587  input_index, pkt->stream_index,
3589  file->nb_streams_warn = pkt->stream_index + 1;
3590 }
3591 
3592 static int transcode_init(void)
3593 {
3594  int ret = 0, i, j, k;
3595  AVFormatContext *oc;
3596  OutputStream *ost;
3597  InputStream *ist;
3598  char error[1024] = {0};
3599 
3600  for (i = 0; i < nb_filtergraphs; i++) {
3601  FilterGraph *fg = filtergraphs[i];
3602  for (j = 0; j < fg->nb_outputs; j++) {
3603  OutputFilter *ofilter = fg->outputs[j];
3604  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3605  continue;
3606  if (fg->nb_inputs != 1)
3607  continue;
3608  for (k = nb_input_streams-1; k >= 0 ; k--)
3609  if (fg->inputs[0]->ist == input_streams[k])
3610  break;
3611  ofilter->ost->source_index = k;
3612  }
3613  }
3614 
3615  /* init framerate emulation */
3616  for (i = 0; i < nb_input_files; i++) {
3618  if (ifile->readrate || ifile->rate_emu)
3619  for (j = 0; j < ifile->nb_streams; j++)
3620  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3621  }
3622 
3623  /* init input streams */
3624  for (i = 0; i < nb_input_streams; i++)
3625  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3626  for (i = 0; i < nb_output_streams; i++) {
3627  ost = output_streams[i];
3629  }
3630  goto dump_format;
3631  }
3632 
3633  /*
3634  * initialize stream copy and subtitle/data streams.
3635  * Encoded AVFrame based streams will get initialized as follows:
3636  * - when the first AVFrame is received in do_video_out
3637  * - just before the first AVFrame is received in either transcode_step
3638  * or reap_filters due to us requiring the filter chain buffer sink
3639  * to be configured with the correct audio frame size, which is only
3640  * known after the encoder is initialized.
3641  */
3642  for (i = 0; i < nb_output_streams; i++) {
3643  if (!output_streams[i]->stream_copy &&
3644  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3646  continue;
3647 
3649  if (ret < 0)
3650  goto dump_format;
3651  }
3652 
3653  /* discard unused programs */
3654  for (i = 0; i < nb_input_files; i++) {
3656  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3657  AVProgram *p = ifile->ctx->programs[j];
3658  int discard = AVDISCARD_ALL;
3659 
3660  for (k = 0; k < p->nb_stream_indexes; k++)
3661  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3663  break;
3664  }
3665  p->discard = discard;
3666  }
3667  }
3668 
3669  /* write headers for files with no streams */
3670  for (i = 0; i < nb_output_files; i++) {
3671  oc = output_files[i]->ctx;
3672  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3674  if (ret < 0)
3675  goto dump_format;
3676  }
3677  }
3678 
3679  dump_format:
3680  /* dump the stream mapping */
3681  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3682  for (i = 0; i < nb_input_streams; i++) {
3683  ist = input_streams[i];
3684 
3685  for (j = 0; j < ist->nb_filters; j++) {
3686  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3687  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3688  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3689  ist->filters[j]->name);
3690  if (nb_filtergraphs > 1)
3691  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3692  av_log(NULL, AV_LOG_INFO, "\n");
3693  }
3694  }
3695  }
3696 
3697  for (i = 0; i < nb_output_streams; i++) {
3698  ost = output_streams[i];
3699 
3700  if (ost->attachment_filename) {
3701  /* an attached file */
3702  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3704  continue;
3705  }
3706 
3708  /* output from a complex graph */
3709  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3710  if (nb_filtergraphs > 1)
3711  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3712 
3713  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3714  ost->index, ost->enc ? ost->enc->name : "?");
3715  continue;
3716  }
3717 
3718  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3721  ost->file_index,
3722  ost->index);
3724  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3726  ost->sync_ist->st->index);
3727  if (ost->stream_copy)
3728  av_log(NULL, AV_LOG_INFO, " (copy)");
3729  else {
3730  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3731  const AVCodec *out_codec = ost->enc;
3732  const char *decoder_name = "?";
3733  const char *in_codec_name = "?";
3734  const char *encoder_name = "?";
3735  const char *out_codec_name = "?";
3736  const AVCodecDescriptor *desc;
3737 
3738  if (in_codec) {
3739  decoder_name = in_codec->name;
3740  desc = avcodec_descriptor_get(in_codec->id);
3741  if (desc)
3742  in_codec_name = desc->name;
3743  if (!strcmp(decoder_name, in_codec_name))
3744  decoder_name = "native";
3745  }
3746 
3747  if (out_codec) {
3748  encoder_name = out_codec->name;
3749  desc = avcodec_descriptor_get(out_codec->id);
3750  if (desc)
3751  out_codec_name = desc->name;
3752  if (!strcmp(encoder_name, out_codec_name))
3753  encoder_name = "native";
3754  }
3755 
3756  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3757  in_codec_name, decoder_name,
3758  out_codec_name, encoder_name);
3759  }
3760  av_log(NULL, AV_LOG_INFO, "\n");
3761  }
3762 
3763  if (ret) {
3764  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3765  return ret;
3766  }
3767 
3769 
3770  return 0;
3771 }
3772 
3773 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3774 static int need_output(void)
3775 {
3776  int i;
3777 
3778  for (i = 0; i < nb_output_streams; i++) {
3782 
3783  if (ost->finished ||
3784  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3785  continue;
3786  if (ost->frame_number >= ost->max_frames) {
3787  int j;
3788  for (j = 0; j < of->ctx->nb_streams; j++)
3790  continue;
3791  }
3792 
3793  return 1;
3794  }
3795 
3796  return 0;
3797 }
3798 
3799 /**
3800  * Select the output stream to process.
3801  *
3802  * @return selected output stream, or NULL if none available
3803  */
3805 {
3806  int i;
3807  int64_t opts_min = INT64_MAX;
3808  OutputStream *ost_min = NULL;
3809 
3810  for (i = 0; i < nb_output_streams; i++) {
3812  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3814  AV_TIME_BASE_Q);
3817  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3819 
3820  if (!ost->initialized && !ost->inputs_done)
3821  return ost->unavailable ? NULL : ost;
3822 
3823  if (!ost->finished && opts < opts_min) {
3824  opts_min = opts;
3825  ost_min = ost->unavailable ? NULL : ost;
3826  }
3827  }
3828  return ost_min;
3829 }
3830 
3831 static void set_tty_echo(int on)
3832 {
3833 #if HAVE_TERMIOS_H
3834  struct termios tty;
3835  if (tcgetattr(0, &tty) == 0) {
3836  if (on) tty.c_lflag |= ECHO;
3837  else tty.c_lflag &= ~ECHO;
3838  tcsetattr(0, TCSANOW, &tty);
3839  }
3840 #endif
3841 }
3842 
3843 static int check_keyboard_interaction(int64_t cur_time)
3844 {
3845  int i, ret, key;
3846  static int64_t last_time;
3847  if (received_nb_signals)
3848  return AVERROR_EXIT;
3849  /* read_key() returns 0 on EOF */
3850  if(cur_time - last_time >= 100000 && !run_as_daemon){
3851  key = read_key();
3852  last_time = cur_time;
3853  }else
3854  key = -1;
3855  if (key == 'q') {
3856  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3857  return AVERROR_EXIT;
3858  }
3859  if (key == '+') av_log_set_level(av_log_get_level()+10);
3860  if (key == '-') av_log_set_level(av_log_get_level()-10);
3861  if (key == 's') qp_hist ^= 1;
3862  if (key == 'h'){
3863  if (do_hex_dump){
3864  do_hex_dump = do_pkt_dump = 0;
3865  } else if(do_pkt_dump){
3866  do_hex_dump = 1;
3867  } else
3868  do_pkt_dump = 1;
3870  }
3871  if (key == 'c' || key == 'C'){
3872  char buf[4096], target[64], command[256], arg[256] = {0};
3873  double time;
3874  int k, n = 0;
3875  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3876  i = 0;
3877  set_tty_echo(1);
3878  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3879  if (k > 0)
3880  buf[i++] = k;
3881  buf[i] = 0;
3882  set_tty_echo(0);
3883  fprintf(stderr, "\n");
3884  if (k > 0 &&
3885  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3886  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3887  target, time, command, arg);
3888  for (i = 0; i < nb_filtergraphs; i++) {
3889  FilterGraph *fg = filtergraphs[i];
3890  if (fg->graph) {
3891  if (time < 0) {
3892  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3893  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3894  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3895  } else if (key == 'c') {
3896  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3898  } else {
3899  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3900  if (ret < 0)
3901  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3902  }
3903  }
3904  }
3905  } else {
3907  "Parse error, at least 3 arguments were expected, "
3908  "only %d given in string '%s'\n", n, buf);
3909  }
3910  }
3911  if (key == 'd' || key == 'D'){
3912  int debug=0;
3913  if(key == 'D') {
3914  debug = input_streams[0]->dec_ctx->debug << 1;
3915  if(!debug) debug = 1;
3916  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3917  debug += debug;
3918  }else{
3919  char buf[32];
3920  int k = 0;
3921  i = 0;
3922  set_tty_echo(1);
3923  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3924  if (k > 0)
3925  buf[i++] = k;
3926  buf[i] = 0;
3927  set_tty_echo(0);
3928  fprintf(stderr, "\n");
3929  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3930  fprintf(stderr,"error parsing debug value\n");
3931  }
3932  for(i=0;i<nb_input_streams;i++) {
3933  input_streams[i]->dec_ctx->debug = debug;
3934  }
3935  for(i=0;i<nb_output_streams;i++) {
3937  ost->enc_ctx->debug = debug;
3938  }
3939  if(debug) av_log_set_level(AV_LOG_DEBUG);
3940  fprintf(stderr,"debug=%d\n", debug);
3941  }
3942  if (key == '?'){
3943  fprintf(stderr, "key function\n"
3944  "? show this help\n"
3945  "+ increase verbosity\n"
3946  "- decrease verbosity\n"
3947  "c Send command to first matching filter supporting it\n"
3948  "C Send/Queue command to all matching filters\n"
3949  "D cycle through available debug modes\n"
3950  "h dump packets/hex press to cycle through the 3 states\n"
3951  "q quit\n"
3952  "s Show QP histogram\n"
3953  );
3954  }
3955  return 0;
3956 }
3957 
3958 #if HAVE_THREADS
3959 static void *input_thread(void *arg)
3960 {
3961  InputFile *f = arg;
3962  AVPacket *pkt = f->pkt, *queue_pkt;
3963  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3964  int ret = 0;
3965 
3966  while (1) {
3967  ret = av_read_frame(f->ctx, pkt);
3968 
3969  if (ret == AVERROR(EAGAIN)) {
3970  av_usleep(10000);
3971  continue;
3972  }
3973  if (ret < 0) {
3974  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3975  break;
3976  }
3977  queue_pkt = av_packet_alloc();
3978  if (!queue_pkt) {
3980  av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
3981  break;
3982  }
3983  av_packet_move_ref(queue_pkt, pkt);
3984  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3985  if (flags && ret == AVERROR(EAGAIN)) {
3986  flags = 0;
3987  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3988  av_log(f->ctx, AV_LOG_WARNING,
3989  "Thread message queue blocking; consider raising the "
3990  "thread_queue_size option (current value: %d)\n",
3991  f->thread_queue_size);
3992  }
3993  if (ret < 0) {
3994  if (ret != AVERROR_EOF)
3995  av_log(f->ctx, AV_LOG_ERROR,
3996  "Unable to send packet to main thread: %s\n",
3997  av_err2str(ret));
3998  av_packet_free(&queue_pkt);
3999  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4000  break;
4001  }
4002  }
4003 
4004  return NULL;
4005 }
4006 
4007 static void free_input_thread(int i)
4008 {
4009  InputFile *f = input_files[i];
4010  AVPacket *pkt;
4011 
4012  if (!f || !f->in_thread_queue)
4013  return;
4015  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4016  av_packet_free(&pkt);
4017 
4018  pthread_join(f->thread, NULL);
4019  f->joined = 1;
4020  av_thread_message_queue_free(&f->in_thread_queue);
4021 }
4022 
4023 static void free_input_threads(void)
4024 {
4025  int i;
4026 
4027  for (i = 0; i < nb_input_files; i++)
4028  free_input_thread(i);
4029 }
4030 
4031 static int init_input_thread(int i)
4032 {
4033  int ret;
4034  InputFile *f = input_files[i];
4035 
4036  if (f->thread_queue_size < 0)
4037  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4038  if (!f->thread_queue_size)
4039  return 0;
4040 
4041  if (f->ctx->pb ? !f->ctx->pb->seekable :
4042  strcmp(f->ctx->iformat->name, "lavfi"))
4043  f->non_blocking = 1;
4044  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4045  f->thread_queue_size, sizeof(f->pkt));
4046  if (ret < 0)
4047  return ret;
4048 
4049  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4050  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4051  av_thread_message_queue_free(&f->in_thread_queue);
4052  return AVERROR(ret);
4053  }
4054 
4055  return 0;
4056 }
4057 
4058 static int init_input_threads(void)
4059 {
4060  int i, ret;
4061 
4062  for (i = 0; i < nb_input_files; i++) {
4063  ret = init_input_thread(i);
4064  if (ret < 0)
4065  return ret;
4066  }
4067  return 0;
4068 }
4069 
4070 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4071 {
4072  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4073  f->non_blocking ?
4075 }
4076 #endif
4077 
4079 {
4080  if (f->readrate || f->rate_emu) {
4081  int i;
4082  int64_t file_start = copy_ts * (
4083  (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
4084  (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
4085  );
4086  float scale = f->rate_emu ? 1.0 : f->readrate;
4087  for (i = 0; i < f->nb_streams; i++) {
4088  InputStream *ist = input_streams[f->ist_index + i];
4089  int64_t stream_ts_offset, pts, now;
4090  if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
4091  stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
4092  pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4093  now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
4094  if (pts > now)
4095  return AVERROR(EAGAIN);
4096  }
4097  }
4098 
4099 #if HAVE_THREADS
4100  if (f->thread_queue_size)
4101  return get_input_packet_mt(f, pkt);
4102 #endif
4103  *pkt = f->pkt;
4104  return av_read_frame(f->ctx, *pkt);
4105 }
4106 
4107 static int got_eagain(void)
4108 {
4109  int i;
4110  for (i = 0; i < nb_output_streams; i++)
4111  if (output_streams[i]->unavailable)
4112  return 1;
4113  return 0;
4114 }
4115 
4116 static void reset_eagain(void)
4117 {
4118  int i;
4119  for (i = 0; i < nb_input_files; i++)
4120  input_files[i]->eagain = 0;
4121  for (i = 0; i < nb_output_streams; i++)
4122  output_streams[i]->unavailable = 0;
4123 }
4124 
4125 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4126 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4127  AVRational time_base)
4128 {
4129  int ret;
4130 
4131  if (!*duration) {
4132  *duration = tmp;
4133  return tmp_time_base;
4134  }
4135 
4136  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4137  if (ret < 0) {
4138  *duration = tmp;
4139  return tmp_time_base;
4140  }
4141 
4142  return time_base;
4143 }
4144 
4146 {
4147  InputStream *ist;
4148  AVCodecContext *avctx;
4149  int i, ret, has_audio = 0;
4150  int64_t duration = 0;
4151 
4152  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4153  if (ret < 0)
4154  return ret;
4155 
4156  for (i = 0; i < ifile->nb_streams; i++) {
4157  ist = input_streams[ifile->ist_index + i];
4158  avctx = ist->dec_ctx;
4159 
4160  /* duration is the length of the last frame in a stream
4161  * when audio stream is present we don't care about
4162  * last video frame length because it's not defined exactly */
4163  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4164  has_audio = 1;
4165  }
4166 
4167  for (i = 0; i < ifile->nb_streams; i++) {
4168  ist = input_streams[ifile->ist_index + i];
4169  avctx = ist->dec_ctx;
4170 
4171  if (has_audio) {
4172  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4173  AVRational sample_rate = {1, avctx->sample_rate};
4174 
4175  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4176  } else {
4177  continue;
4178  }
4179  } else {
4180  if (ist->framerate.num) {
4181  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4182  } else if (ist->st->avg_frame_rate.num) {
4183  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4184  } else {
4185  duration = 1;
4186  }
4187  }
4188  if (!ifile->duration)
4189  ifile->time_base = ist->st->time_base;
4190  /* the total duration of the stream, max_pts - min_pts is
4191  * the duration of the stream without the last frame */
4192  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4193  duration += ist->max_pts - ist->min_pts;
4194  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4195  ifile->time_base);
4196  }
4197 
4198  if (ifile->loop > 0)
4199  ifile->loop--;
4200 
4201  return ret;
4202 }
4203 
4204 /*
4205  * Return
4206  * - 0 -- one packet was read and processed
4207  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4208  * this function should be called again
4209  * - AVERROR_EOF -- this function should not be called again
4210  */
4211 static int process_input(int file_index)
4212 {
4213  InputFile *ifile = input_files[file_index];
4215  InputStream *ist;
4216  AVPacket *pkt;
4217  int ret, thread_ret, i, j;
4218  int64_t duration;
4219  int64_t pkt_dts;
4220  int disable_discontinuity_correction = copy_ts;
4221 
4222  is = ifile->ctx;
4224 
4225  if (ret == AVERROR(EAGAIN)) {
4226  ifile->eagain = 1;
4227  return ret;
4228  }
4229  if (ret < 0 && ifile->loop) {
4230  AVCodecContext *avctx;
4231  for (i = 0; i < ifile->nb_streams; i++) {
4232  ist = input_streams[ifile->ist_index + i];
4233  avctx = ist->dec_ctx;
4234  if (ist->decoding_needed) {
4236  if (ret>0)
4237  return 0;
4238  avcodec_flush_buffers(avctx);
4239  }
4240  }
4241 #if HAVE_THREADS
4242  free_input_thread(file_index);
4243 #endif
4244  ret = seek_to_start(ifile, is);
4245 #if HAVE_THREADS
4246  thread_ret = init_input_thread(file_index);
4247  if (thread_ret < 0)
4248  return thread_ret;
4249 #endif
4250  if (ret < 0)
4251  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4252  else
4254  if (ret == AVERROR(EAGAIN)) {
4255  ifile->eagain = 1;
4256  return ret;
4257  }
4258  }
4259  if (ret < 0) {
4260  if (ret != AVERROR_EOF) {
4261  print_error(is->url, ret);
4262  if (exit_on_error)
4263  exit_program(1);
4264  }
4265 
4266  for (i = 0; i < ifile->nb_streams; i++) {
4267  ist = input_streams[ifile->ist_index + i];
4268  if (ist->decoding_needed) {
4270  if (ret>0)
4271  return 0;
4272  }
4273 
4274  /* mark all outputs that don't go through lavfi as finished */
4275  for (j = 0; j < nb_output_streams; j++) {
4277 
4278  if (ost->source_index == ifile->ist_index + i &&
4279  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4281  }
4282  }
4283 
4284  ifile->eof_reached = 1;
4285  return AVERROR(EAGAIN);
4286  }
4287 
4288  reset_eagain();
4289 
4290  if (do_pkt_dump) {
4292  is->streams[pkt->stream_index]);
4293  }
4294  /* the following test is needed in case new streams appear
4295  dynamically in stream : we ignore them */
4296  if (pkt->stream_index >= ifile->nb_streams) {
4297  report_new_stream(file_index, pkt);
4298  goto discard_packet;
4299  }
4300 
4301  ist = input_streams[ifile->ist_index + pkt->stream_index];
4302 
4303  ist->data_size += pkt->size;
4304  ist->nb_packets++;
4305 
4306  if (ist->discard)
4307  goto discard_packet;
4308 
4309  if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4311  "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4312  if (exit_on_error)
4313  exit_program(1);
4314  }
4315 
4316  if (debug_ts) {
4317  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4318  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4319  ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4320  av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4321  av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4322  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4323  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4324  av_ts2str(input_files[ist->file_index]->ts_offset),
4326  }
4327 
4328  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4329  int64_t stime, stime2;
4330  // Correcting starttime based on the enabled streams
4331  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4332  // so we instead do it here as part of discontinuity handling
4333  if ( ist->next_dts == AV_NOPTS_VALUE
4334  && ifile->ts_offset == -is->start_time
4335  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4336  int64_t new_start_time = INT64_MAX;
4337  for (i=0; i<is->nb_streams; i++) {
4338  AVStream *st = is->streams[i];
4339  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4340  continue;
4341  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4342  }
4343  if (new_start_time > is->start_time) {
4344  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4345  ifile->ts_offset = -new_start_time;
4346  }
4347  }
4348 
4349  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4350  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4351  ist->wrap_correction_done = 1;
4352 
4353  if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4354  pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4355  ist->wrap_correction_done = 0;
4356  }
4357  if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4358  pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4359  ist->wrap_correction_done = 0;
4360  }
4361  }
4362 
4363  /* add the stream-global side data to the first packet */
4364  if (ist->nb_packets == 1) {
4365  for (i = 0; i < ist->st->nb_side_data; i++) {
4366  AVPacketSideData *src_sd = &ist->st->side_data[i];
4367  uint8_t *dst_data;
4368 
4369  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4370  continue;
4371 
4372  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4373  continue;
4374 
4375  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4376  if (!dst_data)
4377  exit_program(1);
4378 
4379  memcpy(dst_data, src_sd->data, src_sd->size);
4380  }
4381  }
4382 
4383  if (pkt->dts != AV_NOPTS_VALUE)
4384  pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4385  if (pkt->pts != AV_NOPTS_VALUE)
4386  pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4387 
4388  if (pkt->pts != AV_NOPTS_VALUE)
4389  pkt->pts *= ist->ts_scale;
4390  if (pkt->dts != AV_NOPTS_VALUE)
4391  pkt->dts *= ist->ts_scale;
4392 
4394  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4395  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4396  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4397  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4398  int64_t delta = pkt_dts - ifile->last_ts;
4399  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4401  ifile->ts_offset -= delta;
4403  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4404  delta, ifile->ts_offset);
4405  pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4406  if (pkt->pts != AV_NOPTS_VALUE)
4407  pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4408  }
4409  }
4410 
4411  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4412  if (pkt->pts != AV_NOPTS_VALUE) {
4413  pkt->pts += duration;
4414  ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4415  ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4416  }
4417 
4418  if (pkt->dts != AV_NOPTS_VALUE)
4419  pkt->dts += duration;
4420 
4422 
4423  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4424  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4425  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4426  ist->st->time_base, AV_TIME_BASE_Q,
4428  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4429  disable_discontinuity_correction = 0;
4430  }
4431 
4432  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4433  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4434  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4435  !disable_discontinuity_correction) {
4436  int64_t delta = pkt_dts - ist->next_dts;
4437  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4438  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4440  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4441  ifile->ts_offset -= delta;
4443  "timestamp discontinuity for stream #%d:%d "
4444  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4445  ist->file_index, ist->st->index, ist->st->id,
4446  av_get_media_type_string(ist->dec_ctx->codec_type),
4447  delta, ifile->ts_offset);
4448  pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4449  if (pkt->pts != AV_NOPTS_VALUE)
4450  pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4451  }
4452  } else {
4453  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4455  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4456  pkt->dts = AV_NOPTS_VALUE;
4457  }
4458  if (pkt->pts != AV_NOPTS_VALUE){
4459  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4460  delta = pkt_pts - ist->next_dts;
4461  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4463  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4464  pkt->pts = AV_NOPTS_VALUE;
4465  }
4466  }
4467  }
4468  }
4469 
4470  if (pkt->dts != AV_NOPTS_VALUE)
4471  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4472 
4473  if (debug_ts) {
4474  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4475  ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4476  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4477  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4478  av_ts2str(input_files[ist->file_index]->ts_offset),
4480  }
4481 
4483 
4485 
4486 discard_packet:
4487 #if HAVE_THREADS
4488  if (ifile->thread_queue_size)
4489  av_packet_free(&pkt);
4490  else
4491 #endif
4493 
4494  return 0;
4495 }
4496 
4497 /**
4498  * Perform a step of transcoding for the specified filter graph.
4499  *
4500  * @param[in] graph filter graph to consider
4501  * @param[out] best_ist input stream where a frame would allow to continue
4502  * @return 0 for success, <0 for error
4503  */
4505 {
4506  int i, ret;
4507  int nb_requests, nb_requests_max = 0;
4509  InputStream *ist;
4510 
4511  *best_ist = NULL;
4513  if (ret >= 0)
4514  return reap_filters(0);
4515 
4516  if (ret == AVERROR_EOF) {
4517  ret = reap_filters(1);
4518  for (i = 0; i < graph->nb_outputs; i++)
4519  close_output_stream(graph->outputs[i]->ost);
4520  return ret;
4521  }
4522  if (ret != AVERROR(EAGAIN))
4523  return ret;
4524 
4525  for (i = 0; i < graph->nb_inputs; i++) {
4526  ifilter = graph->inputs[i];
4527  ist = ifilter->ist;
4528  if (input_files[ist->file_index]->eagain ||
4529  input_files[ist->file_index]->eof_reached)
4530  continue;
4532  if (nb_requests > nb_requests_max) {
4533  nb_requests_max = nb_requests;
4534  *best_ist = ist;
4535  }
4536  }
4537 
4538  if (!*best_ist)
4539  for (i = 0; i < graph->nb_outputs; i++)
4540  graph->outputs[i]->ost->unavailable = 1;
4541 
4542  return 0;
4543 }
4544 
4545 /**
4546  * Run a single step of transcoding.
4547  *
4548  * @return 0 for success, <0 for error
4549  */
4550 static int transcode_step(void)
4551 {
4552  OutputStream *ost;
4553  InputStream *ist = NULL;
4554  int ret;
4555 
4556  ost = choose_output();
4557  if (!ost) {
4558  if (got_eagain()) {
4559  reset_eagain();
4560  av_usleep(10000);
4561  return 0;
4562  }
4563  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4564  return AVERROR_EOF;
4565  }
4566 
4567  if (ost->filter && !ost->filter->graph->graph) {
4570  if (ret < 0) {
4571  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4572  return ret;
4573  }
4574  }
4575  }
4576 
4577  if (ost->filter && ost->filter->graph->graph) {
4578  /*
4579  * Similar case to the early audio initialization in reap_filters.
4580  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4581  * audio frame buffering/creation to get the output audio frame size
4582  * in samples correct. The audio frame size for the filter chain is
4583  * configured during the output stream initialization.
4584  *
4585  * Apparently avfilter_graph_request_oldest (called in
4586  * transcode_from_filter just down the line) peeks. Peeking already
4587  * puts one frame "ready to be given out", which means that any
4588  * update in filter buffer sink configuration afterwards will not
4589  * help us. And yes, even if it would be utilized,
4590  * av_buffersink_get_samples is affected, as it internally utilizes
4591  * the same early exit for peeked frames.
4592  *
4593  * In other words, if avfilter_graph_request_oldest would not make
4594  * further filter chain configuration or usage of
4595  * av_buffersink_get_samples useless (by just causing the return
4596  * of the peeked AVFrame as-is), we could get rid of this additional
4597  * early encoder initialization.
4598  */
4601 
4602  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4603  return ret;
4604  if (!ist)
4605  return 0;
4606  } else if (ost->filter) {
4607  int i;
4608  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4611  ist = ifilter->ist;
4612  break;
4613  }
4614  }
4615  if (!ist) {
4616  ost->inputs_done = 1;
4617  return 0;
4618  }
4619  } else {
4620  av_assert0(ost->source_index >= 0);
4622  }
4623 
4624  ret = process_input(ist->file_index);
4625  if (ret == AVERROR(EAGAIN)) {
4626  if (input_files[ist->file_index]->eagain)
4627  ost->unavailable = 1;
4628  return 0;
4629  }
4630 
4631  if (ret < 0)
4632  return ret == AVERROR_EOF ? 0 : ret;
4633 
4634  return reap_filters(0);
4635 }
4636 
4637 /*
4638  * The following code is the main loop of the file converter
4639  */
4640 static int transcode(void)
4641 {
4642  int ret, i;
4643  AVFormatContext *os;
4644  OutputStream *ost;
4645  InputStream *ist;
4646  int64_t timer_start;
4647  int64_t total_packets_written = 0;
4648 
4649  ret = transcode_init();
4650  if (ret < 0)
4651  goto fail;
4652 
4653  if (stdin_interaction) {
4654  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4655  }
4656 
4657  timer_start = av_gettime_relative();
4658 
4659 #if HAVE_THREADS
4660  if ((ret = init_input_threads()) < 0)
4661  goto fail;
4662 #endif
4663 
4664  while (!received_sigterm) {
4665  int64_t cur_time= av_gettime_relative();
4666 
4667  /* if 'q' pressed, exits */
4668  if (stdin_interaction)
4669  if (check_keyboard_interaction(cur_time) < 0)
4670  break;
4671 
4672  /* check if there's any stream where output is still needed */
4673  if (!need_output()) {
4674  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4675  break;
4676  }
4677 
4678  ret = transcode_step();
4679  if (ret < 0 && ret != AVERROR_EOF) {
4680  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4681  break;
4682  }
4683 
4684  /* dump report by using the output first video and audio streams */
4685  print_report(0, timer_start, cur_time);
4686  }
4687 #if HAVE_THREADS
4688  free_input_threads();
4689 #endif
4690 
4691  /* at the end of stream, we must flush the decoder buffers */
4692  for (i = 0; i < nb_input_streams; i++) {
4693  ist = input_streams[i];
4694  if (!input_files[ist->file_index]->eof_reached) {
4696  }
4697  }
4698  flush_encoders();
4699 
4700  term_exit();
4701 
4702  /* write the trailer if needed */
4703  for (i = 0; i < nb_output_files; i++) {
4704  os = output_files[i]->ctx;
4705  if (!output_files[i]->header_written) {
4707  "Nothing was written into output file %d (%s), because "
4708  "at least one of its streams received no packets.\n",
4709  i, os->url);
4710  continue;
4711  }
4712  if ((ret = av_write_trailer(os)) < 0) {
4713  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4714  if (exit_on_error)
4715  exit_program(1);
4716  }
4717  }
4718 
4719  /* dump report by using the first video and audio streams */
4720  print_report(1, timer_start, av_gettime_relative());
4721 
4722  /* close the output files */
4723  for (i = 0; i < nb_output_files; i++) {
4724  os = output_files[i]->ctx;
4725  if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
4726  if ((ret = avio_closep(&os->pb)) < 0) {
4727  av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
4728  if (exit_on_error)
4729  exit_program(1);
4730  }
4731  }
4732  }
4733 
4734  /* close each encoder */
4735  for (i = 0; i < nb_output_streams; i++) {
4736  ost = output_streams[i];
4737  if (ost->encoding_needed) {
4739  }
4740  total_packets_written += ost->packets_written;
4742  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4743  exit_program(1);
4744  }
4745  }
4746 
4747  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4748  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4749  exit_program(1);
4750  }
4751 
4752  /* close each decoder */
4753  for (i = 0; i < nb_input_streams; i++) {
4754  ist = input_streams[i];
4755  if (ist->decoding_needed) {
4756  avcodec_close(ist->dec_ctx);
4757  if (ist->hwaccel_uninit)
4758  ist->hwaccel_uninit(ist->dec_ctx);
4759  }
4760  }
4761 
4763 
4764  /* finished ! */
4765  ret = 0;
4766 
4767  fail:
4768 #if HAVE_THREADS
4769  free_input_threads();
4770 #endif
4771 
4772  if (output_streams) {
4773  for (i = 0; i < nb_output_streams; i++) {
4774  ost = output_streams[i];
4775  if (ost) {
4776  if (ost->logfile) {
4777  if (fclose(ost->logfile))
4779  "Error closing logfile, loss of information possible: %s\n",
4780  av_err2str(AVERROR(errno)));
4781  ost->logfile = NULL;
4782  }
4784  av_freep(&ost->apad);
4789  }
4790  }
4791  }
4792  return ret;
4793 }
4794 
4796 {
4797  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4798 #if HAVE_GETRUSAGE
4799  struct rusage rusage;
4800 
4801  getrusage(RUSAGE_SELF, &rusage);
4802  time_stamps.user_usec =
4803  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4804  time_stamps.sys_usec =
4805  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4806 #elif HAVE_GETPROCESSTIMES
4807  HANDLE proc;
4808  FILETIME c, e, k, u;
4809  proc = GetCurrentProcess();
4810  GetProcessTimes(proc, &c, &e, &k, &u);
4811  time_stamps.user_usec =
4812  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4813  time_stamps.sys_usec =
4814  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4815 #else
4816  time_stamps.user_usec = time_stamps.sys_usec = 0;
4817 #endif
4818  return time_stamps;
4819 }
4820 
4821 static int64_t getmaxrss(void)
4822 {
4823 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4824  struct rusage rusage;
4825  getrusage(RUSAGE_SELF, &rusage);
4826  return (int64_t)rusage.ru_maxrss * 1024;
4827 #elif HAVE_GETPROCESSMEMORYINFO
4828  HANDLE proc;
4829  PROCESS_MEMORY_COUNTERS memcounters;
4830  proc = GetCurrentProcess();
4831  memcounters.cb = sizeof(memcounters);
4832  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4833  return memcounters.PeakPagefileUsage;
4834 #else
4835  return 0;
4836 #endif
4837 }
4838 
4839 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4840 {
4841 }
4842 
4843 int main(int argc, char **argv)
4844 {
4845  int i, ret;
4847 
4848  init_dynload();
4849 
4851 
4852  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4853 
4855  parse_loglevel(argc, argv, options);
4856 
4857  if(argc>1 && !strcmp(argv[1], "-d")){
4858  run_as_daemon=1;
4860  argc--;
4861  argv++;
4862  }
4863 
4864 #if CONFIG_AVDEVICE
4866 #endif
4868 
4869  show_banner(argc, argv, options);
4870 
4871  /* parse options and open all input/output files */
4872  ret = ffmpeg_parse_options(argc, argv);
4873  if (ret < 0)
4874  exit_program(1);
4875 
4876  if (nb_output_files <= 0 && nb_input_files == 0) {
4877  show_usage();
4878  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4879  exit_program(1);
4880  }
4881 
4882  /* file converter / grab */
4883  if (nb_output_files <= 0) {
4884  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4885  exit_program(1);
4886  }
4887 
4888  for (i = 0; i < nb_output_files; i++) {
4889  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4890  want_sdp = 0;
4891  }
4892 
4894  if (transcode() < 0)
4895  exit_program(1);
4896  if (do_benchmark) {
4897  int64_t utime, stime, rtime;
4899  utime = current_time.user_usec - ti.user_usec;
4900  stime = current_time.sys_usec - ti.sys_usec;
4901  rtime = current_time.real_usec - ti.real_usec;
4903  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4904  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4905  }
4906  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4909  exit_program(69);
4910 
4912  return main_return_code;
4913 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:192
AVSubtitle
Definition: avcodec.h:2289
avcodec_close
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: avcodec.c:444
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:143
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1012
ost
OutputStream * ost
Definition: ffmpeg_filter.c:160
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:250
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:424
AVCodec
AVCodec.
Definition: codec.h:202
FF_ENABLE_DEPRECATION_WARNINGS
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
OutputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:545
pthread_join
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:739
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
ifilter_parameters_from_codecpar
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1886
need_output
static int need_output(void)
Definition: ffmpeg.c:3774
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:149
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:2005
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:1415
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:74
level
uint8_t level
Definition: svq3.c:204
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:426
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:265
av_clip
#define av_clip
Definition: common.h:96
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:436
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:4116
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:388
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:54
seek_to_start
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4145
AVOutputFormat::name
const char * name
Definition: avformat.h:504
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:151
opt.h
OutputStream::attachment_filename
const char * attachment_filename
Definition: ffmpeg.h:536
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:973
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:346
OutputStream::frame_number
int frame_number
Definition: ffmpeg.h:457
OutputStream::enc
AVCodecContext * enc
Definition: muxing.c:56
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
forced_keyframes_const_names
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:42
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1043
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:960
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:146
libm.h
InputFilter::width
int width
Definition: ffmpeg.h:249
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1129
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:234
InputFilter::displaymatrix
int32_t * displaymatrix
Definition: ffmpeg.h:257
OutputStream::last_dropped
int last_dropped
Definition: ffmpeg.h:480
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:243
init_output_stream
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:3433
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:992
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:435
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:68
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:617
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:288
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1418
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:386
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:264
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
OutputStream::enc_ctx
AVCodecContext * enc_ctx
Definition: ffmpeg.h:473
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:91
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2660
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:478
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:296
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:873
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:997
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:140
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:281
InputFile::nb_streams_warn
int nb_streams_warn
Definition: ffmpeg.h:416
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:90
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:674
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:151
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:311
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:475
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
AVCodecContext::coded_side_data
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:1833
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4550
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:125
AVSubtitleRect
Definition: avcodec.h:2261
OutputStream::packets_written
uint64_t packets_written
Definition: ffmpeg.h:547
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2293
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:109
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:145
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:581
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:217
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:317
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AV_THREAD_MESSAGE_NONBLOCK
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1268
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:424
index
fg index
Definition: ffmpeg_filter.c:167
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:953
AVFrame::width
int width
Definition: frame.h:389
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:526
AVPacketSideData
Definition: packet.h:314
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:221
w
uint8_t w
Definition: llviddspenc.c:38
OutputStream::rotate_overridden
int rotate_overridden
Definition: ffmpeg.h:492
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:805
FKF_T
@ FKF_T
Definition: ffmpeg.h:437
AVPacket::data
uint8_t * data
Definition: packet.h:373
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:143
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
finish_output_stream
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1463
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:989
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:474
AVOption
AVOption.
Definition: opt.h:247
OutputStream::index
int index
Definition: ffmpeg.h:453
ist
ifilter ist
Definition: ffmpeg_filter.c:177
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:40
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:146
FilterGraph::index
int index
Definition: ffmpeg.h:287
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:1015
nb_output_dumped
static unsigned nb_output_dumped
Definition: ffmpeg.c:139
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:367
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:601
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
OutputStream::sws_dict
AVDictionary * sws_dict
Definition: ffmpeg.h:522
avcodec_parameters_free
void avcodec_parameters_free(AVCodecParameters **ppar)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: codec_par.c:61
AVCodecContext::subtitle_header
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:1683
transcode
static int transcode(void)
Definition: ffmpeg.c:4640
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:126
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:144
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:3385
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:1001
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:297
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:50
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:391
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
max
#define max(a, b)
Definition: cuda_runtime.h:33
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:247
check_recording_time
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:920
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1412
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2300
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:281
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1899
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:352
OutputStream::max_frames
int64_t max_frames
Definition: ffmpeg.h:476
OutputStream::rotate_override_value
double rotate_override_value
Definition: ffmpeg.h:495
tf_sess_config.config
config
Definition: tf_sess_config.py:33
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:428
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:163
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:329
AVBSFContext
The bitstream filter state.
Definition: bsf.h:47
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:3274
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:157
want_sdp
static int want_sdp
Definition: ffmpeg.c:141
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:121
InputFilter::channel_layout
uint64_t channel_layout
Definition: ffmpeg.h:254
OutputStream::forced_keyframes_pexpr
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:505
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:239
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:401
OutputStream::encoding_needed
int encoding_needed
Definition: ffmpeg.h:456
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:128
InputStream
Definition: ffmpeg.h:302
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:166
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1710
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:159
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:355
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:4107
AVPacketSideData::size
size_t size
Definition: packet.h:316
OutputStream::forced_kf_pts
int64_t * forced_kf_pts
Definition: ffmpeg.h:501
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2232
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1467
stats_period
int64_t stats_period
Definition: ffmpeg_opt.c:170
nb_frames_drop
static int nb_frames_drop
Definition: ffmpeg.c:137
OutputStream::vsync_method
enum VideoSyncMethod vsync_method
Definition: ffmpeg.h:488
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1087
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:198
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:145
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
fifo.h
graph
ofilter graph
Definition: ffmpeg_filter.c:171
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:39
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1246
finish
static void finish(void)
Definition: movenc.c:342
OutputStream::filters
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:518
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:168
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:392
OutputStream::initialized
int initialized
Definition: ffmpeg.h:532
OutputStream::disposition
char * disposition
Definition: ffmpeg.h:539
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:127
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:242
OutputStream::logfile_prefix
char * logfile_prefix
Definition: ffmpeg.h:513
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:66
samplefmt.h
OutputStream::forced_keyframes_expr_const_values
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:506
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1127
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:504
check_init_output_file
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2944
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:463
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:409
AVChapter
Definition: avformat.h:1159
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:177
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1388
pts
static int64_t pts
Definition: transcode_aac.c:653
av_thread_message_queue_recv
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
Definition: threadmessage.c:172
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:317
InputFilter::frame_queue
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:244
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:278
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:424
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:985
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:41
OutputFile::opts
AVDictionary * opts
Definition: ffmpeg.h:578
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
loop
static int loop
Definition: ffplay.c:339
do_pkt_dump
int do_pkt_dump
Definition: ffmpeg_opt.c:155
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:1302
InputFile
Definition: ffmpeg.h:399
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:260
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3027
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:425
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:72
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:523
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:515
OutputStream::copy_initial_nonkeyframes
int copy_initial_nonkeyframes
Definition: ffmpeg.h:537
OutputFile::shortest
int shortest
Definition: ffmpeg.h:584
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
OutputStream::last_nb0_frames
int last_nb0_frames[3]
Definition: ffmpeg.h:481
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:322
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:946
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
OutputStream::sync_opts
int64_t sync_opts
Definition: ffmpeg.h:461
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:442
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:429
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:621
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:156
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3804
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:124
media_type_string
#define media_type_string
Definition: cmdutils.h:642
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1661
AVCodecContext::extradata_size
int extradata_size
Definition: avcodec.h:485
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:64
OutputStream::forced_keyframes
char * forced_keyframes
Definition: ffmpeg.h:504
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:55
input_streams
InputStream ** input_streams
Definition: ffmpeg.c:148
llrintf
#define llrintf(x)
Definition: libm.h:399
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecContext::stats_in
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1244
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:449
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4795
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:298
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:141
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2136
OutputStream::frame_aspect_ratio
AVRational frame_aspect_ratio
Definition: ffmpeg.h:497
copy_ts_first_pts
static int64_t copy_ts_first_pts
Definition: ffmpeg.c:348
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:871
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:224
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:628
OutputStream::mux_timebase
AVRational mux_timebase
Definition: ffmpeg.h:468
AVFormatContext::chapters
AVChapter ** chapters
Definition: avformat.h:1419
AVDictionaryEntry::key
char * key
Definition: dict.h:80
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:447
frame_size
int frame_size
Definition: mxfenc.c:2199
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: codec_id.h:218
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputStream::audio_channels_mapped
int audio_channels_mapped
Definition: ffmpeg.h:511
InputFilter
Definition: ffmpeg.h:237
OutputStream::copy_prior_start
int copy_prior_start
Definition: ffmpeg.h:538
get_input_stream
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2931
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:642
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:408
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:2022
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:622
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:264
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1425
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:315
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:448
OutputStream::avfilter
char * avfilter
Definition: ffmpeg.h:517
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:366
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:422
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:141
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:165
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2295
abort_codec_experimental
static void abort_codec_experimental(const AVCodec *c, int encoder)
Definition: ffmpeg.c:692
OutputFile::header_written
int header_written
Definition: ffmpeg.h:586
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:337
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:529
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
compare_int64
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2938
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:490
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:240
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1377
key
const char * key
Definition: hwcontext_opencl.c:168
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
OutputStream::last_frame
AVFrame * last_frame
Definition: ffmpeg.h:478
av_fifo_realloc2
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AV_FIELD_BT
@ AV_FIELD_BT
Definition: codec_par.h:42
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:683
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2578
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:167
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:906
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:4211
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:472
init_output_stream_wrapper
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
Definition: ffmpeg.c:980
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:393
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1666
arg
const char * arg
Definition: jacosubdec.c:67
pthread_create
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:65
avio_flush
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:252
AVCodecParserContext::repeat_pict
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:2793
output_streams
OutputStream ** output_streams
Definition: ffmpeg.c:153
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4504
OutputStream::encoder_opts
AVDictionary * encoder_opts
Definition: ffmpeg.h:521
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
AVFormatContext
Format I/O context.
Definition: avformat.h:1200
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:112
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1095
run_as_daemon
static int run_as_daemon
Definition: ffmpeg.c:134
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1559
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:266
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:965
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:593
NULL
#define NULL
Definition: coverity.c:32
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:967
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:304
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4843
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:697
avio_print
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:541
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2282
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2360
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:156
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVCodecContext::nb_coded_side_data
int nb_coded_side_data
Definition: avcodec.h:1834
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4821
AVCodecContext::bit_rate
int64_t bit_rate
the average bitrate
Definition: avcodec.h:433
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:317
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:428
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3843
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVCodecContext::subtitle_header_size
int subtitle_header_size
Definition: avcodec.h:1684
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1242
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
src
#define src
Definition: vp8dsp.c:255
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
parseutils.h
OutputStream::audio_channels_map
int * audio_channels_map
Definition: ffmpeg.h:510
InputFilter::channels
int channels
Definition: ffmpeg.h:253
mathops.h
duration_max
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4126
OutputStream::frame_rate
AVRational frame_rate
Definition: ffmpeg.h:486
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1128
main_return_code
static int main_return_code
Definition: ffmpeg.c:347
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:112
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:1006
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:259
OutputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:491
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:134
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3592
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1284
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2817
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:74
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:137
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:223
time.h
close_all_output_streams
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:718
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:344
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:153
nb_input_streams
int nb_input_streams
Definition: ffmpeg.c:149
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:432
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:481
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:299
OutputStream::logfile
FILE * logfile
Definition: ffmpeg.h:514
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
InputStream::sub2video::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:361
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:170
input_files
InputFile ** input_files
Definition: ffmpeg.c:150
AVStream::nb_frames
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:987
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:320
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:586
FilterGraph
Definition: ffmpeg.h:286
print_stats
int print_stats
Definition: ffmpeg_opt.c:162
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1256
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:238
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:523
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:508
print_sdp
static int print_sdp(void)
Definition: ffmpeg.c:2764
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:506
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:232
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1308
options
const OptionDef options[]
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1236
OutputStream::force_fps
int force_fps
Definition: ffmpeg.h:490
OutputStream::filter
OutputFilter * filter
Definition: ffmpeg.h:516
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:414
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:313
AVPacket::size
int size
Definition: packet.h:374
InputStream::start
int64_t start
Definition: ffmpeg.h:317
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:49
threadmessage.h
OutputStream::error
int64_t error[4]
Definition: ffmpeg.h:573
InputStream::file_index
int file_index
Definition: ffmpeg.h:303
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
Definition: ffmpeg.c:1142
OutputStream::forced_kf_count
int forced_kf_count
Definition: ffmpeg.h:502
output_files
OutputFile ** output_files
Definition: ffmpeg.c:155
SIGNAL
#define SIGNAL(sig, func)
Definition: ffmpeg.c:404
parse_forced_key_frames
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3187
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:453
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:343
start_time
static int64_t start_time
Definition: ffplay.c:330
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:290
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1283
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1000
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2562
InputStream::got_output
int got_output
Definition: ffmpeg.h:350
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:83
size
int size
Definition: twinvq_data.h:10344
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:156
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
subtitle_out
static uint8_t * subtitle_out
Definition: ffmpeg.c:146
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:158
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1204
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:656
InputStream::prev_sub
struct InputStream::@2 prev_sub
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrameSideData::data
uint8_t * data
Definition: frame.h:225
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:542
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:1997
OutputStream::source_index
int source_index
Definition: ffmpeg.h:454
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:308
sub2video_update
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:241
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:464
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:404
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:1004
avdevice.h
AVFMT_NOSTREAMS
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:480
OSTFinished
OSTFinished
Definition: ffmpeg.h:446
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:691
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:372
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:232
OutputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:263
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:235
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:377
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
OutputFile::limit_filesize
uint64_t limit_filesize
Definition: ffmpeg.h:582
dup_warning
static unsigned dup_warning
Definition: ffmpeg.c:136
av_sdp_create
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:905
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:379
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:487
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:152
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:528
OutputStream::pict_type
int pict_type
Definition: ffmpeg.h:570
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2244
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
av_thread_message_queue_alloc
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:993
decode
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2259
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:216
guess_input_channel_layout
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2097
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1185
OutputStream::first_pts
int64_t first_pts
Definition: ffmpeg.h:464
write_packet
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:727
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:1058
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:72
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:123
do_video_stats
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1420
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:443
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:40
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:63
OutputStream::max_frame_rate
AVRational max_frame_rate
Definition: ffmpeg.h:487
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
OutputStream::filters_script
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:519
OutputStream::pkt
AVPacket * pkt
Definition: ffmpeg.h:479
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:579
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1285
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1243
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
OutputStream::apad
char * apad
Definition: ffmpeg.h:524
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:397
bprint.h
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:309
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:226
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:271
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:366
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:547
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:253
FFMIN3
#define FFMIN3(a, b, c)
Definition: macros.h:50
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:313
internal.h
AVCodecParameters::height
int height
Definition: codec_par.h:127
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:147
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: codec_par.h:177
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:165
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:53
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
av_thread_message_queue_set_err_send
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
Definition: threadmessage.c:188
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
OutputStream::sync_ist
struct InputStream * sync_ist
Definition: ffmpeg.h:460
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:160
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:351
OutputFile::ost_index
int ost_index
Definition: ffmpeg.h:579
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:430
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:400
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:462
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:435
av_get_audio_frame_duration
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:806
transcode_subtitles
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2485
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:263
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1124
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:209
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:974
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:158
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:128
OutputStream::muxing_queue
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:558
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:513
AVCodecContext::height
int height
Definition: avcodec.h:556
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:355
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:593
OutputStream::ref_par
AVCodecParameters * ref_par
Definition: ffmpeg.h:474
nb_output_files
int nb_output_files
Definition: ffmpeg.c:156
limit
static double limit(double x)
Definition: vf_pseudocolor.c:128
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:497
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFMT_TS_NONSTRICT
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:485
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
ofilter
OutputFilter * ofilter
Definition: ffmpeg_filter.c:162
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:96
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:949
AVFMT_FLAG_BITEXACT
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1335
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:935
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
OutputStream::max_muxing_queue_size
int max_muxing_queue_size
Definition: ffmpeg.h:555
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:161
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:379
output_packet
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:894
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:241
AVFormatContext::oformat
const struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1219
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:56
normalize.ifile
ifile
Definition: normalize.py:6
sdp_filename
char * sdp_filename
Definition: ffmpeg_opt.c:142
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
OutputStream::forced_kf_ref_pts
int64_t forced_kf_ref_pts
Definition: ffmpeg.h:500
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:369
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:288
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:402
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:933
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:93
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:419
InputFile::ist_index
int ist_index
Definition: ffmpeg.h:403
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
ifilter
InputFilter * ifilter
Definition: ffmpeg_filter.c:163
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:198
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:67
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:252
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:1314
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1148
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:71
AVCodecContext
main external API structure.
Definition: avcodec.h:383
AVFrame::height
int height
Definition: frame.h:389
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:943
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:345
BenchmarkTimeStamps
Definition: ffmpeg.c:123
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:1939
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:445
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
InputFilter::format
int format
Definition: ffmpeg.h:247
OutputStream::finished
OSTFinished finished
Definition: ffmpeg.h:525
report_new_stream
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3577
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:232
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
av_stream_get_end_pts
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:87
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2117
avfilter.h
FilterGraph::is_meta
int is_meta
Definition: ffmpeg.h:294
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:688
OutputStream::last_mux_dts
int64_t last_mux_dts
Definition: ffmpeg.h:466
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputStream::quality
int quality
Definition: ffmpeg.h:553
OutputStream::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:494
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1084
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
InputFilter::height
int height
Definition: ffmpeg.h:249
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:82
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:375
video_sync_method
enum VideoSyncMethod video_sync_method
Definition: ffmpeg_opt.c:150
OutputStream::frames_encoded
uint64_t frames_encoded
Definition: ffmpeg.h:549
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:855
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:410
InputStream::discard
int discard
Definition: ffmpeg.h:305
AVFilterContext
An instance of a filter.
Definition: avfilter.h:402
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1302
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1676
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:121
VSYNC_AUTO
@ VSYNC_AUTO
Definition: ffmpeg.h:51
OutputFilter
Definition: ffmpeg.h:262
nb_frames_dup
static int nb_frames_dup
Definition: ffmpeg.c:135
FF_DISABLE_DEPRECATION_WARNINGS
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:82
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:359
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:391
desc
const char * desc
Definition: libsvtav1.c:79
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:264
start_at_zero
int start_at_zero
Definition: ffmpeg_opt.c:157
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:477
OutputStream::file_index
int file_index
Definition: ffmpeg.h:452
audio_volume
int audio_volume
Definition: ffmpeg_opt.c:148
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:272
OutputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:577
OutputFilter::out_tmp
AVFilterInOut * out_tmp
Definition: ffmpeg.h:269
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:231
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:38
llrint
#define llrint(x)
Definition: libm.h:394
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3146
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:223
AVCodecParameters::format
int format
Definition: codec_par.h:84
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:489
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:256
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
Definition: ffmpeg.c:2147
av_fifo_freep
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:434
AVDictionaryEntry
Definition: dict.h:79
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:357
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:164
OutputStream::muxing_queue_data_size
size_t muxing_queue_data_size
Definition: ffmpeg.h:564
do_hex_dump
int do_hex_dump
Definition: ffmpeg_opt.c:154
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
AVPacket
This structure stores compressed data.
Definition: packet.h:350
av_thread_message_queue_free
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1228
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
init_input_stream
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2866
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:393
get_input_packet
static int get_input_packet(InputFile *f, AVPacket **pkt)
Definition: ffmpeg.c:4078
AVCodecParameters::channel_layout
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:48
FFMAX3
#define FFMAX3(a, b, c)
Definition: macros.h:48
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
Definition: utils.c:1772
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:159
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:556
int32_t
int32_t
Definition: audioconvert.c:56
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:157
OutputStream::muxing_queue_data_threshold
size_t muxing_queue_data_threshold
Definition: ffmpeg.h:567
imgutils.h
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:132
timestamp.h
OutputStream
Definition: muxing.c:54
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
OutputStream::st
AVStream * st
Definition: muxing.c:55
av_thread_message_queue_set_err_recv
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
Definition: threadmessage.c:199
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1174
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
codec_flags
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:38
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:146
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_stream_get_parser
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:95
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: codec.h:460
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
av_pkt_dump_log2
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:116
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3521
OutputStream::stream_copy
int stream_copy
Definition: ffmpeg.h:527
AVDictionaryEntry::value
char * value
Definition: dict.h:81
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:975
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3831
avstring.h
OutputStream::swr_opts
AVDictionary * swr_opts
Definition: ffmpeg.h:523
OutputStream::forced_kf_index
int forced_kf_index
Definition: ffmpeg.h:503
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
OutputStream::filtered_frame
AVFrame * filtered_frame
Definition: ffmpeg.h:477
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:414
FKF_N
@ FKF_N
Definition: ffmpeg.h:433
log_callback_null
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4839
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:580
OutputStream::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg.h:507
OutputStream::samples_encoded
uint64_t samples_encoded
Definition: ffmpeg.h:550
OutputStream::inputs_done
int inputs_done
Definition: ffmpeg.h:534
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
InputStream::dec
const AVCodec * dec
Definition: ffmpeg.h:312
snprintf
#define snprintf
Definition: snprintf.h:34
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:441
read_key
static int read_key(void)
Definition: ffmpeg.c:457
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1482
buffersrc.h
OutputStream::bsf_ctx
AVBSFContext * bsf_ctx
Definition: ffmpeg.h:471
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:352
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:753
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:133
init_output_bsfs
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2998
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1178
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3250
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
dec_ctx
static AVCodecContext * dec_ctx
Definition: filtering_audio.c:44
nb_output_streams
int nb_output_streams
Definition: ffmpeg.c:154
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:231
OutputFile
Definition: ffmpeg.h:576
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:64
OutputStream::enc_timebase
AVRational enc_timebase
Definition: ffmpeg.h:469