FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavcodec/version.h"
68 #include "libavformat/os_support.h"
69 
70 # include "libavfilter/avfilter.h"
71 # include "libavfilter/buffersrc.h"
72 # include "libavfilter/buffersink.h"
73 
74 #if HAVE_SYS_RESOURCE_H
75 #include <sys/time.h>
76 #include <sys/types.h>
77 #include <sys/resource.h>
78 #elif HAVE_GETPROCESSTIMES
79 #include <windows.h>
80 #endif
81 #if HAVE_GETPROCESSMEMORYINFO
82 #include <windows.h>
83 #include <psapi.h>
84 #endif
85 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <windows.h>
87 #endif
88 
89 
90 #if HAVE_SYS_SELECT_H
91 #include <sys/select.h>
92 #endif
93 
94 #if HAVE_TERMIOS_H
95 #include <fcntl.h>
96 #include <sys/ioctl.h>
97 #include <sys/time.h>
98 #include <termios.h>
99 #elif HAVE_KBHIT
100 #include <conio.h>
101 #endif
102 
103 #include <time.h>
104 
105 #include "ffmpeg.h"
106 #include "cmdutils.h"
107 
108 #include "libavutil/avassert.h"
109 
110 const char program_name[] = "ffmpeg";
111 const int program_birth_year = 2000;
112 
113 static FILE *vstats_file;
114 
115 const char *const forced_keyframes_const_names[] = {
116  "n",
117  "n_forced",
118  "prev_forced_n",
119  "prev_forced_t",
120  "t",
121  NULL
122 };
123 
124 typedef struct BenchmarkTimeStamps {
125  int64_t real_usec;
126  int64_t user_usec;
127  int64_t sys_usec;
129 
131 static int64_t getmaxrss(void);
133 
134 static int64_t nb_frames_dup = 0;
135 static uint64_t dup_warning = 1000;
136 static int64_t nb_frames_drop = 0;
137 static int64_t decode_error_stat[2];
138 unsigned nb_output_dumped = 0;
139 
140 int want_sdp = 1;
141 
144 
145 static uint8_t *subtitle_out;
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
348 
349 static void
351 {
352  int ret;
353  received_sigterm = sig;
356  if(received_nb_signals > 3) {
357  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358  strlen("Received > 3 system signals, hard exiting\n"));
359  if (ret < 0) { /* Do nothing */ };
360  exit(123);
361  }
362 }
363 
364 #if HAVE_SETCONSOLECTRLHANDLER
365 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 {
367  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
368 
369  switch (fdwCtrlType)
370  {
371  case CTRL_C_EVENT:
372  case CTRL_BREAK_EVENT:
373  sigterm_handler(SIGINT);
374  return TRUE;
375 
376  case CTRL_CLOSE_EVENT:
377  case CTRL_LOGOFF_EVENT:
378  case CTRL_SHUTDOWN_EVENT:
379  sigterm_handler(SIGTERM);
380  /* Basically, with these 3 events, when we return from this method the
381  process is hard terminated, so stall as long as we need to
382  to try and let the main thread(s) clean up and gracefully terminate
383  (we have at most 5 seconds, but should be done far before that). */
384  while (!ffmpeg_exited) {
385  Sleep(0);
386  }
387  return TRUE;
388 
389  default:
390  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
391  return FALSE;
392  }
393 }
394 #endif
395 
396 #ifdef __linux__
397 #define SIGNAL(sig, func) \
398  do { \
399  action.sa_handler = func; \
400  sigaction(sig, &action, NULL); \
401  } while (0)
402 #else
403 #define SIGNAL(sig, func) \
404  signal(sig, func)
405 #endif
406 
407 void term_init(void)
408 {
409 #if defined __linux__
410  struct sigaction action = {0};
411  action.sa_handler = sigterm_handler;
412 
413  /* block other interrupts while processing this one */
414  sigfillset(&action.sa_mask);
415 
416  /* restart interruptible functions (i.e. don't fail with EINTR) */
417  action.sa_flags = SA_RESTART;
418 #endif
419 
420 #if HAVE_TERMIOS_H
421  if (stdin_interaction) {
422  struct termios tty;
423  if (tcgetattr (0, &tty) == 0) {
424  oldtty = tty;
425  restore_tty = 1;
426 
427  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
428  |INLCR|IGNCR|ICRNL|IXON);
429  tty.c_oflag |= OPOST;
430  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
431  tty.c_cflag &= ~(CSIZE|PARENB);
432  tty.c_cflag |= CS8;
433  tty.c_cc[VMIN] = 1;
434  tty.c_cc[VTIME] = 0;
435 
436  tcsetattr (0, TCSANOW, &tty);
437  }
438  SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
439  }
440 #endif
441 
442  SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
443  SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
444 #ifdef SIGXCPU
445  SIGNAL(SIGXCPU, sigterm_handler);
446 #endif
447 #ifdef SIGPIPE
448  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
449 #endif
450 #if HAVE_SETCONSOLECTRLHANDLER
451  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
452 #endif
453 }
454 
455 /* read a key without blocking */
456 static int read_key(void)
457 {
458  unsigned char ch;
459 #if HAVE_TERMIOS_H
460  int n = 1;
461  struct timeval tv;
462  fd_set rfds;
463 
464  FD_ZERO(&rfds);
465  FD_SET(0, &rfds);
466  tv.tv_sec = 0;
467  tv.tv_usec = 0;
468  n = select(1, &rfds, NULL, NULL, &tv);
469  if (n > 0) {
470  n = read(0, &ch, 1);
471  if (n == 1)
472  return ch;
473 
474  return n;
475  }
476 #elif HAVE_KBHIT
477 # if HAVE_PEEKNAMEDPIPE
478  static int is_pipe;
479  static HANDLE input_handle;
480  DWORD dw, nchars;
481  if(!input_handle){
482  input_handle = GetStdHandle(STD_INPUT_HANDLE);
483  is_pipe = !GetConsoleMode(input_handle, &dw);
484  }
485 
486  if (is_pipe) {
487  /* When running under a GUI, you will end here. */
488  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
489  // input pipe may have been closed by the program that ran ffmpeg
490  return -1;
491  }
492  //Read it
493  if(nchars != 0) {
494  read(0, &ch, 1);
495  return ch;
496  }else{
497  return -1;
498  }
499  }
500 # endif
501  if(kbhit())
502  return(getch());
503 #endif
504  return -1;
505 }
506 
507 static int decode_interrupt_cb(void *ctx)
508 {
510 }
511 
513 
514 static void ffmpeg_cleanup(int ret)
515 {
516  int i, j;
517 
518  if (do_benchmark) {
519  int maxrss = getmaxrss() / 1024;
520  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
521  }
522 
523  for (i = 0; i < nb_filtergraphs; i++) {
524  FilterGraph *fg = filtergraphs[i];
526  for (j = 0; j < fg->nb_inputs; j++) {
527  InputFilter *ifilter = fg->inputs[j];
528  struct InputStream *ist = ifilter->ist;
529 
530  if (ifilter->frame_queue) {
531  AVFrame *frame;
532  while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
534  av_fifo_freep2(&ifilter->frame_queue);
535  }
536  av_freep(&ifilter->displaymatrix);
537  if (ist->sub2video.sub_queue) {
538  AVSubtitle sub;
539  while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
542  }
543  av_buffer_unref(&ifilter->hw_frames_ctx);
544  av_freep(&ifilter->name);
545  av_freep(&fg->inputs[j]);
546  }
547  av_freep(&fg->inputs);
548  for (j = 0; j < fg->nb_outputs; j++) {
549  OutputFilter *ofilter = fg->outputs[j];
550 
551  avfilter_inout_free(&ofilter->out_tmp);
552  av_freep(&ofilter->name);
554  av_freep(&fg->outputs[j]);
555  }
556  av_freep(&fg->outputs);
557  av_freep(&fg->graph_desc);
558 
560  }
562 
564 
565  /* close files */
566  for (i = 0; i < nb_output_files; i++)
568 
569  for (i = 0; i < nb_output_streams; i++) {
571 
572  if (!ost)
573  continue;
574 
575  av_bsf_free(&ost->bsf_ctx);
576 
577  av_frame_free(&ost->filtered_frame);
578  av_frame_free(&ost->last_frame);
579  av_packet_free(&ost->pkt);
580  av_dict_free(&ost->encoder_opts);
581 
582  av_freep(&ost->forced_keyframes);
583  av_expr_free(ost->forced_keyframes_pexpr);
584  av_freep(&ost->avfilter);
585  av_freep(&ost->logfile_prefix);
586 
587  av_freep(&ost->audio_channels_map);
588  ost->audio_channels_mapped = 0;
589 
590  av_dict_free(&ost->sws_dict);
591  av_dict_free(&ost->swr_opts);
592 
593  avcodec_free_context(&ost->enc_ctx);
594  avcodec_parameters_free(&ost->ref_par);
595 
596  if (ost->muxing_queue) {
597  AVPacket *pkt;
598  while (av_fifo_read(ost->muxing_queue, &pkt, 1) >= 0)
600  av_fifo_freep2(&ost->muxing_queue);
601  }
602 
604  }
605 #if HAVE_THREADS
606  free_input_threads();
607 #endif
608  for (i = 0; i < nb_input_files; i++) {
612  }
613  for (i = 0; i < nb_input_streams; i++) {
614  InputStream *ist = input_streams[i];
615 
617  av_packet_free(&ist->pkt);
618  av_dict_free(&ist->decoder_opts);
621  av_freep(&ist->filters);
622  av_freep(&ist->hwaccel_device);
623  av_freep(&ist->dts_buffer);
624 
626 
628  }
629 
630  if (vstats_file) {
631  if (fclose(vstats_file))
633  "Error closing vstats file, loss of information possible: %s\n",
634  av_err2str(AVERROR(errno)));
635  }
638 
643 
644  uninit_opts();
645 
647 
648  if (received_sigterm) {
649  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
650  (int) received_sigterm);
651  } else if (ret && atomic_load(&transcode_init_done)) {
652  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
653  }
654  term_exit();
655  ffmpeg_exited = 1;
656 }
657 
659 {
660  const AVDictionaryEntry *t = NULL;
661 
662  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
664  }
665 }
666 
668 {
669  const AVDictionaryEntry *t;
670  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
671  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
672  exit_program(1);
673  }
674 }
675 
676 static void abort_codec_experimental(const AVCodec *c, int encoder)
677 {
678  exit_program(1);
679 }
680 
681 static void update_benchmark(const char *fmt, ...)
682 {
683  if (do_benchmark_all) {
685  va_list va;
686  char buf[1024];
687 
688  if (fmt) {
689  va_start(va, fmt);
690  vsnprintf(buf, sizeof(buf), fmt, va);
691  va_end(va);
693  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
696  t.real_usec - current_time.real_usec, buf);
697  }
698  current_time = t;
699  }
700 }
701 
703 {
704  OutputFile *of = output_files[ost->file_index];
705  AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
706 
707  ost->finished |= ENCODER_FINISHED;
708  if (of->shortest) {
709  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
710  of->recording_time = FFMIN(of->recording_time, end);
711  }
712 }
713 
714 /*
715  * Send a single packet to the output, applying any bitstream filters
716  * associated with the output stream. This may result in any number
717  * of packets actually being written, depending on what bitstream
718  * filters are applied. The supplied packet is consumed and will be
719  * blank (as if newly-allocated) when this function returns.
720  *
721  * If eof is set, instead indicate EOF to all bitstream filters and
722  * therefore flush any delayed packets to the output. A blank packet
723  * must be supplied in this case.
724  */
726  OutputStream *ost, int eof)
727 {
728  int ret = 0;
729 
730  /* apply the output bitstream filters */
731  if (ost->bsf_ctx) {
732  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
733  if (ret < 0)
734  goto finish;
735  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
736  of_write_packet(of, pkt, ost, 0);
737  if (ret == AVERROR(EAGAIN))
738  ret = 0;
739  } else if (!eof)
740  of_write_packet(of, pkt, ost, 0);
741 
742 finish:
743  if (ret < 0 && ret != AVERROR_EOF) {
744  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
745  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
746  if(exit_on_error)
747  exit_program(1);
748  }
749 }
750 
752 {
753  OutputFile *of = output_files[ost->file_index];
754 
755  if (of->recording_time != INT64_MAX &&
756  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
757  AV_TIME_BASE_Q) >= 0) {
759  return 0;
760  }
761  return 1;
762 }
763 
765  AVFrame *frame)
766 {
767  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
768  AVCodecContext *enc = ost->enc_ctx;
769  if (!frame || frame->pts == AV_NOPTS_VALUE ||
770  !enc || !ost->filter || !ost->filter->graph->graph)
771  goto early_exit;
772 
773  {
774  AVFilterContext *filter = ost->filter->filter;
775 
776  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
778  AVRational tb = enc->time_base;
779  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
780 
781  tb.den <<= extra_bits;
782  float_pts =
783  av_rescale_q(frame->pts, filter_tb, tb) -
785  float_pts /= 1 << extra_bits;
786  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
787  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
788 
789  frame->pts =
790  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
792  }
793 
794 early_exit:
795 
796  if (debug_ts) {
797  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
798  frame ? av_ts2str(frame->pts) : "NULL",
799  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
800  float_pts,
801  enc ? enc->time_base.num : -1,
802  enc ? enc->time_base.den : -1);
803  }
804 
805  return float_pts;
806 }
807 
809  char *error, int error_len);
810 
812  unsigned int fatal)
813 {
814  int ret = AVERROR_BUG;
815  char error[1024] = {0};
816 
817  if (ost->initialized)
818  return 0;
819 
820  ret = init_output_stream(ost, frame, error, sizeof(error));
821  if (ret < 0) {
822  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
823  ost->file_index, ost->index, error);
824 
825  if (fatal)
826  exit_program(1);
827  }
828 
829  return ret;
830 }
831 
832 static double psnr(double d)
833 {
834  return -10.0 * log10(d);
835 }
836 
837 static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
838 {
840  NULL);
841  AVCodecContext *enc = ost->enc_ctx;
842  int64_t frame_number;
843  double ti1, bitrate, avg_bitrate;
844 
845  ost->quality = sd ? AV_RL32(sd) : -1;
846  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
847 
848  for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
849  if (sd && i < sd[5])
850  ost->error[i] = AV_RL64(sd + 8 + 8*i);
851  else
852  ost->error[i] = -1;
853  }
854 
855  if (!write_vstats)
856  return;
857 
858  /* this is executed just the first time update_video_stats is called */
859  if (!vstats_file) {
860  vstats_file = fopen(vstats_filename, "w");
861  if (!vstats_file) {
862  perror("fopen");
863  exit_program(1);
864  }
865  }
866 
867  frame_number = ost->packets_encoded;
868  if (vstats_version <= 1) {
869  fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
870  ost->quality / (float)FF_QP2LAMBDA);
871  } else {
872  fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
873  ost->quality / (float)FF_QP2LAMBDA);
874  }
875 
876  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
877  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
878 
879  fprintf(vstats_file,"f_size= %6d ", pkt->size);
880  /* compute pts value */
881  ti1 = pkt->dts * av_q2d(ost->mux_timebase);
882  if (ti1 < 0.01)
883  ti1 = 0.01;
884 
885  bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
886  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
887  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
888  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
889  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
890 }
891 
893 {
894  AVCodecContext *enc = ost->enc_ctx;
895  AVPacket *pkt = ost->pkt;
896  const char *type_desc = av_get_media_type_string(enc->codec_type);
897  const char *action = frame ? "encode" : "flush";
898  int ret;
899 
900  if (frame) {
901  ost->frames_encoded++;
902 
903  if (debug_ts) {
904  av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
905  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
906  type_desc,
907  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
908  enc->time_base.num, enc->time_base.den);
909  }
910  }
911 
913 
914  ret = avcodec_send_frame(enc, frame);
915  if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
916  av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
917  type_desc);
918  return ret;
919  }
920 
921  while (1) {
923  update_benchmark("%s_%s %d.%d", action, type_desc,
924  ost->file_index, ost->index);
925 
926  /* if two pass, output log on success and EOF */
927  if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
928  fprintf(ost->logfile, "%s", enc->stats_out);
929 
930  if (ret == AVERROR(EAGAIN)) {
931  av_assert0(frame); // should never happen during flushing
932  return 0;
933  } else if (ret == AVERROR_EOF) {
934  output_packet(of, pkt, ost, 1);
935  return ret;
936  } else if (ret < 0) {
937  av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
938  return ret;
939  }
940 
941  if (debug_ts) {
942  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
943  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
944  "duration:%s duration_time:%s\n",
945  type_desc,
949  }
950 
951  av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
952 
953  if (debug_ts) {
954  av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
955  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
956  "duration:%s duration_time:%s\n",
957  type_desc,
961  }
962 
963  if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
965 
966  ost->packets_encoded++;
967 
968  output_packet(of, pkt, ost, 0);
969  }
970 
971  av_assert0(0);
972 }
973 
975  AVFrame *frame)
976 {
977  int ret;
978 
980 
982  return;
983 
984  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
985  frame->pts = ost->sync_opts;
986  ost->sync_opts = frame->pts + frame->nb_samples;
987  ost->samples_encoded += frame->nb_samples;
988 
989  ret = encode_frame(of, ost, frame);
990  if (ret < 0)
991  exit_program(1);
992 }
993 
994 static void do_subtitle_out(OutputFile *of,
995  OutputStream *ost,
996  AVSubtitle *sub)
997 {
998  int subtitle_out_max_size = 1024 * 1024;
999  int subtitle_out_size, nb, i;
1000  AVCodecContext *enc;
1001  AVPacket *pkt = ost->pkt;
1002  int64_t pts;
1003 
1004  if (sub->pts == AV_NOPTS_VALUE) {
1005  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1006  if (exit_on_error)
1007  exit_program(1);
1008  return;
1009  }
1010 
1011  enc = ost->enc_ctx;
1012 
1013  if (!subtitle_out) {
1014  subtitle_out = av_malloc(subtitle_out_max_size);
1015  if (!subtitle_out) {
1016  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1017  exit_program(1);
1018  }
1019  }
1020 
1021  /* Note: DVB subtitle need one packet to draw them and one other
1022  packet to clear them */
1023  /* XXX: signal it in the codec context ? */
1024  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1025  nb = 2;
1026  else
1027  nb = 1;
1028 
1029  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1030  pts = sub->pts;
1031  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1032  pts -= output_files[ost->file_index]->start_time;
1033  for (i = 0; i < nb; i++) {
1034  unsigned save_num_rects = sub->num_rects;
1035 
1036  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1037  if (!check_recording_time(ost))
1038  return;
1039 
1040  sub->pts = pts;
1041  // start_display_time is required to be 0
1042  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1043  sub->end_display_time -= sub->start_display_time;
1044  sub->start_display_time = 0;
1045  if (i == 1)
1046  sub->num_rects = 0;
1047 
1048  ost->frames_encoded++;
1049 
1050  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1051  subtitle_out_max_size, sub);
1052  if (i == 1)
1053  sub->num_rects = save_num_rects;
1054  if (subtitle_out_size < 0) {
1055  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1056  exit_program(1);
1057  }
1058 
1060  pkt->data = subtitle_out;
1061  pkt->size = subtitle_out_size;
1062  pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1063  pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1064  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1065  /* XXX: the pts correction is handled here. Maybe handling
1066  it in the codec would be better */
1067  if (i == 0)
1068  pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1069  else
1070  pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1071  }
1072  pkt->dts = pkt->pts;
1073  output_packet(of, pkt, ost, 0);
1074  }
1075 }
1076 
1077 /* May modify/reset next_picture */
1078 static void do_video_out(OutputFile *of,
1079  OutputStream *ost,
1080  AVFrame *next_picture)
1081 {
1082  int ret;
1083  AVCodecContext *enc = ost->enc_ctx;
1084  AVRational frame_rate;
1085  int64_t nb_frames, nb0_frames, i;
1086  double delta, delta0;
1087  double duration = 0;
1088  double sync_ipts = AV_NOPTS_VALUE;
1089  InputStream *ist = NULL;
1090  AVFilterContext *filter = ost->filter->filter;
1091 
1092  init_output_stream_wrapper(ost, next_picture, 1);
1093  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1094 
1095  if (ost->source_index >= 0)
1096  ist = input_streams[ost->source_index];
1097 
1098  frame_rate = av_buffersink_get_frame_rate(filter);
1099  if (frame_rate.num > 0 && frame_rate.den > 0)
1100  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1101 
1102  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1103  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1104 
1105  if (!ost->filters_script &&
1106  !ost->filters &&
1107  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1108  next_picture &&
1109  ist &&
1110  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1111  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1112  }
1113 
1114  if (!next_picture) {
1115  //end, flushing
1116  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1117  ost->last_nb0_frames[1],
1118  ost->last_nb0_frames[2]);
1119  } else {
1120  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1121  delta = delta0 + duration;
1122 
1123  /* by default, we output a single frame */
1124  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1125  nb_frames = 1;
1126 
1127  if (delta0 < 0 &&
1128  delta > 0 &&
1129  ost->vsync_method != VSYNC_PASSTHROUGH &&
1130  ost->vsync_method != VSYNC_DROP) {
1131  if (delta0 < -0.6) {
1132  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1133  } else
1134  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1135  sync_ipts = ost->sync_opts;
1136  duration += delta0;
1137  delta0 = 0;
1138  }
1139 
1140  switch (ost->vsync_method) {
1141  case VSYNC_VSCFR:
1142  if (ost->frame_number == 0 && delta0 >= 0.5) {
1143  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1144  delta = duration;
1145  delta0 = 0;
1146  ost->sync_opts = llrint(sync_ipts);
1147  }
1148  case VSYNC_CFR:
1149  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1150  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1151  nb_frames = 0;
1152  } else if (delta < -1.1)
1153  nb_frames = 0;
1154  else if (delta > 1.1) {
1155  nb_frames = llrintf(delta);
1156  if (delta0 > 1.1)
1157  nb0_frames = llrintf(delta0 - 0.6);
1158  }
1159  break;
1160  case VSYNC_VFR:
1161  if (delta <= -0.6)
1162  nb_frames = 0;
1163  else if (delta > 0.6)
1164  ost->sync_opts = llrint(sync_ipts);
1165  break;
1166  case VSYNC_DROP:
1167  case VSYNC_PASSTHROUGH:
1168  ost->sync_opts = llrint(sync_ipts);
1169  break;
1170  default:
1171  av_assert0(0);
1172  }
1173  }
1174 
1175  /*
1176  * For video, number of frames in == number of packets out.
1177  * But there may be reordering, so we can't throw away frames on encoder
1178  * flush, we need to limit them here, before they go into encoder.
1179  */
1180  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1181  nb0_frames = FFMIN(nb0_frames, nb_frames);
1182 
1183  memmove(ost->last_nb0_frames + 1,
1184  ost->last_nb0_frames,
1185  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1186  ost->last_nb0_frames[0] = nb0_frames;
1187 
1188  if (nb0_frames == 0 && ost->last_dropped) {
1189  nb_frames_drop++;
1191  "*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
1192  ost->frame_number, ost->st->index, ost->last_frame->pts);
1193  }
1194  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1195  if (nb_frames > dts_error_threshold * 30) {
1196  av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1197  nb_frames_drop++;
1198  return;
1199  }
1200  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1201  av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1202  if (nb_frames_dup > dup_warning) {
1203  av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1204  dup_warning *= 10;
1205  }
1206  }
1207  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1208  ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1209 
1210  /* duplicates frame if needed */
1211  for (i = 0; i < nb_frames; i++) {
1212  AVFrame *in_picture;
1213  int forced_keyframe = 0;
1214  double pts_time;
1215 
1216  if (i < nb0_frames && ost->last_frame->buf[0]) {
1217  in_picture = ost->last_frame;
1218  } else
1219  in_picture = next_picture;
1220 
1221  if (!in_picture)
1222  return;
1223 
1224  in_picture->pts = ost->sync_opts;
1225 
1226  if (!check_recording_time(ost))
1227  return;
1228 
1229  in_picture->quality = enc->global_quality;
1230  in_picture->pict_type = 0;
1231 
1232  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1233  in_picture->pts != AV_NOPTS_VALUE)
1234  ost->forced_kf_ref_pts = in_picture->pts;
1235 
1236  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1237  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1238  if (ost->forced_kf_index < ost->forced_kf_count &&
1239  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1240  ost->forced_kf_index++;
1241  forced_keyframe = 1;
1242  } else if (ost->forced_keyframes_pexpr) {
1243  double res;
1244  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1245  res = av_expr_eval(ost->forced_keyframes_pexpr,
1246  ost->forced_keyframes_expr_const_values, NULL);
1247  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1248  ost->forced_keyframes_expr_const_values[FKF_N],
1249  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1250  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1251  ost->forced_keyframes_expr_const_values[FKF_T],
1252  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1253  res);
1254  if (res) {
1255  forced_keyframe = 1;
1256  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1257  ost->forced_keyframes_expr_const_values[FKF_N];
1258  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1259  ost->forced_keyframes_expr_const_values[FKF_T];
1260  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1261  }
1262 
1263  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1264  } else if ( ost->forced_keyframes
1265  && !strncmp(ost->forced_keyframes, "source", 6)
1266  && in_picture->key_frame==1
1267  && !i) {
1268  forced_keyframe = 1;
1269  } else if ( ost->forced_keyframes
1270  && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1271  && !i) {
1272  forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1273  ost->dropped_keyframe = 0;
1274  }
1275 
1276  if (forced_keyframe) {
1277  in_picture->pict_type = AV_PICTURE_TYPE_I;
1278  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1279  }
1280 
1281  ret = encode_frame(of, ost, in_picture);
1282  if (ret < 0)
1283  exit_program(1);
1284 
1285  ost->sync_opts++;
1286  ost->frame_number++;
1287  }
1288 
1289  av_frame_unref(ost->last_frame);
1290  if (next_picture)
1291  av_frame_move_ref(ost->last_frame, next_picture);
1292 }
1293 
1295 {
1296  OutputFile *of = output_files[ost->file_index];
1297  AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
1298 
1299  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1300 
1301  if (of->shortest) {
1302  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
1303  of->recording_time = FFMIN(of->recording_time, end);
1304  }
1305 }
1306 
1307 /**
1308  * Get and encode new output from any of the filtergraphs, without causing
1309  * activity.
1310  *
1311  * @return 0 for success, <0 for severe errors
1312  */
1313 static int reap_filters(int flush)
1314 {
1315  AVFrame *filtered_frame = NULL;
1316  int i;
1317 
1318  /* Reap all buffers present in the buffer sinks */
1319  for (i = 0; i < nb_output_streams; i++) {
1321  OutputFile *of = output_files[ost->file_index];
1323  AVCodecContext *enc = ost->enc_ctx;
1324  int ret = 0;
1325 
1326  if (!ost->filter || !ost->filter->graph->graph)
1327  continue;
1328  filter = ost->filter->filter;
1329 
1330  /*
1331  * Unlike video, with audio the audio frame size matters.
1332  * Currently we are fully reliant on the lavfi filter chain to
1333  * do the buffering deed for us, and thus the frame size parameter
1334  * needs to be set accordingly. Where does one get the required
1335  * frame size? From the initialized AVCodecContext of an audio
1336  * encoder. Thus, if we have gotten to an audio stream, initialize
1337  * the encoder earlier than receiving the first AVFrame.
1338  */
1341 
1342  filtered_frame = ost->filtered_frame;
1343 
1344  while (1) {
1345  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1347  if (ret < 0) {
1348  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1350  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1351  } else if (flush && ret == AVERROR_EOF) {
1353  do_video_out(of, ost, NULL);
1354  }
1355  break;
1356  }
1357  if (ost->finished) {
1358  av_frame_unref(filtered_frame);
1359  continue;
1360  }
1361 
1362  switch (av_buffersink_get_type(filter)) {
1363  case AVMEDIA_TYPE_VIDEO:
1364  if (!ost->frame_aspect_ratio.num)
1365  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1366 
1367  do_video_out(of, ost, filtered_frame);
1368  break;
1369  case AVMEDIA_TYPE_AUDIO:
1370  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371  enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1373  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1374  break;
1375  }
1376  do_audio_out(of, ost, filtered_frame);
1377  break;
1378  default:
1379  // TODO support subtitle filters
1380  av_assert0(0);
1381  }
1382 
1383  av_frame_unref(filtered_frame);
1384  }
1385  }
1386 
1387  return 0;
1388 }
1389 
1390 static void print_final_stats(int64_t total_size)
1391 {
1392  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393  uint64_t subtitle_size = 0;
1394  uint64_t data_size = 0;
1395  float percent = -1.0;
1396  int i, j;
1397  int pass1_used = 1;
1398 
1399  for (i = 0; i < nb_output_streams; i++) {
1401  switch (ost->enc_ctx->codec_type) {
1402  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405  default: other_size += ost->data_size; break;
1406  }
1407  extra_size += ost->enc_ctx->extradata_size;
1408  data_size += ost->data_size;
1409  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1411  pass1_used = 0;
1412  }
1413 
1414  if (data_size && total_size>0 && total_size >= data_size)
1415  percent = 100.0 * (total_size - data_size) / data_size;
1416 
1417  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418  video_size / 1024.0,
1419  audio_size / 1024.0,
1420  subtitle_size / 1024.0,
1421  other_size / 1024.0,
1422  extra_size / 1024.0);
1423  if (percent >= 0.0)
1424  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425  else
1426  av_log(NULL, AV_LOG_INFO, "unknown");
1427  av_log(NULL, AV_LOG_INFO, "\n");
1428 
1429  /* print verbose per-stream stats */
1430  for (i = 0; i < nb_input_files; i++) {
1431  InputFile *f = input_files[i];
1432  uint64_t total_packets = 0, total_size = 0;
1433 
1434  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435  i, f->ctx->url);
1436 
1437  for (j = 0; j < f->nb_streams; j++) {
1438  InputStream *ist = input_streams[f->ist_index + j];
1439  enum AVMediaType type = ist->dec_ctx->codec_type;
1440 
1441  total_size += ist->data_size;
1442  total_packets += ist->nb_packets;
1443 
1444  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1446  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447  ist->nb_packets, ist->data_size);
1448 
1449  if (ist->decoding_needed) {
1450  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451  ist->frames_decoded);
1452  if (type == AVMEDIA_TYPE_AUDIO)
1453  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454  av_log(NULL, AV_LOG_VERBOSE, "; ");
1455  }
1456 
1457  av_log(NULL, AV_LOG_VERBOSE, "\n");
1458  }
1459 
1460  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461  total_packets, total_size);
1462  }
1463 
1464  for (i = 0; i < nb_output_files; i++) {
1465  OutputFile *of = output_files[i];
1466  uint64_t total_packets = 0, total_size = 0;
1467 
1468  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469  i, of->ctx->url);
1470 
1471  for (j = 0; j < of->ctx->nb_streams; j++) {
1473  enum AVMediaType type = ost->enc_ctx->codec_type;
1474 
1475  total_size += ost->data_size;
1476  total_packets += ost->packets_written;
1477 
1478  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1480  if (ost->encoding_needed) {
1481  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482  ost->frames_encoded);
1483  if (type == AVMEDIA_TYPE_AUDIO)
1484  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485  av_log(NULL, AV_LOG_VERBOSE, "; ");
1486  }
1487 
1488  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489  ost->packets_written, ost->data_size);
1490 
1491  av_log(NULL, AV_LOG_VERBOSE, "\n");
1492  }
1493 
1494  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495  total_packets, total_size);
1496  }
1497  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499  if (pass1_used) {
1500  av_log(NULL, AV_LOG_WARNING, "\n");
1501  } else {
1502  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503  }
1504  }
1505 }
1506 
1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1508 {
1509  AVBPrint buf, buf_script;
1510  OutputStream *ost;
1511  AVFormatContext *oc;
1512  int64_t total_size;
1513  AVCodecContext *enc;
1514  int vid, i;
1515  double bitrate;
1516  double speed;
1517  int64_t pts = INT64_MIN + 1;
1518  static int64_t last_time = -1;
1519  static int first_report = 1;
1520  static int qp_histogram[52];
1521  int hours, mins, secs, us;
1522  const char *hours_sign;
1523  int ret;
1524  float t;
1525 
1526  if (!print_stats && !is_last_report && !progress_avio)
1527  return;
1528 
1529  if (!is_last_report) {
1530  if (last_time == -1) {
1531  last_time = cur_time;
1532  }
1533  if (((cur_time - last_time) < stats_period && !first_report) ||
1534  (first_report && nb_output_dumped < nb_output_files))
1535  return;
1536  last_time = cur_time;
1537  }
1538 
1539  t = (cur_time-timer_start) / 1000000.0;
1540 
1541 
1542  oc = output_files[0]->ctx;
1543 
1544  total_size = avio_size(oc->pb);
1545  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1546  total_size = avio_tell(oc->pb);
1547 
1548  vid = 0;
1550  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1551  for (i = 0; i < nb_output_streams; i++) {
1552  float q = -1;
1553  ost = output_streams[i];
1554  enc = ost->enc_ctx;
1555  if (!ost->stream_copy)
1556  q = ost->quality / (float) FF_QP2LAMBDA;
1557 
1558  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1559  av_bprintf(&buf, "q=%2.1f ", q);
1560  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1561  ost->file_index, ost->index, q);
1562  }
1563  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1564  float fps;
1565  int64_t frame_number = ost->frame_number;
1566 
1567  fps = t > 1 ? frame_number / t : 0;
1568  av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1569  frame_number, fps < 9.95, fps, q);
1570  av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1571  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1572  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1573  ost->file_index, ost->index, q);
1574  if (is_last_report)
1575  av_bprintf(&buf, "L");
1576  if (qp_hist) {
1577  int j;
1578  int qp = lrintf(q);
1579  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1580  qp_histogram[qp]++;
1581  for (j = 0; j < 32; j++)
1582  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1583  }
1584 
1585  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1586  int j;
1587  double error, error_sum = 0;
1588  double scale, scale_sum = 0;
1589  double p;
1590  char type[3] = { 'Y','U','V' };
1591  av_bprintf(&buf, "PSNR=");
1592  for (j = 0; j < 3; j++) {
1593  if (is_last_report) {
1594  error = enc->error[j];
1595  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1596  } else {
1597  error = ost->error[j];
1598  scale = enc->width * enc->height * 255.0 * 255.0;
1599  }
1600  if (j)
1601  scale /= 4;
1602  error_sum += error;
1603  scale_sum += scale;
1604  p = psnr(error / scale);
1605  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1606  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1607  ost->file_index, ost->index, type[j] | 32, p);
1608  }
1609  p = psnr(error_sum / scale_sum);
1610  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1611  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1612  ost->file_index, ost->index, p);
1613  }
1614  vid = 1;
1615  }
1616  /* compute min output value */
1619  ost->st->time_base, AV_TIME_BASE_Q));
1620  if (copy_ts) {
1621  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1625  }
1626  }
1627 
1628  if (is_last_report)
1629  nb_frames_drop += ost->last_dropped;
1630  }
1631 
1632  secs = FFABS(pts) / AV_TIME_BASE;
1633  us = FFABS(pts) % AV_TIME_BASE;
1634  mins = secs / 60;
1635  secs %= 60;
1636  hours = mins / 60;
1637  mins %= 60;
1638  hours_sign = (pts < 0) ? "-" : "";
1639 
1640  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1641  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1642 
1643  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1644  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1645  if (pts == AV_NOPTS_VALUE) {
1646  av_bprintf(&buf, "N/A ");
1647  } else {
1648  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1649  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1650  }
1651 
1652  if (bitrate < 0) {
1653  av_bprintf(&buf, "bitrate=N/A");
1654  av_bprintf(&buf_script, "bitrate=N/A\n");
1655  }else{
1656  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1657  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1658  }
1659 
1660  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1661  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1662  if (pts == AV_NOPTS_VALUE) {
1663  av_bprintf(&buf_script, "out_time_us=N/A\n");
1664  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1665  av_bprintf(&buf_script, "out_time=N/A\n");
1666  } else {
1667  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1668  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1669  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1670  hours_sign, hours, mins, secs, us);
1671  }
1672 
1674  av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1675  av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1676  av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1677 
1678  if (speed < 0) {
1679  av_bprintf(&buf, " speed=N/A");
1680  av_bprintf(&buf_script, "speed=N/A\n");
1681  } else {
1682  av_bprintf(&buf, " speed=%4.3gx", speed);
1683  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1684  }
1685 
1686  if (print_stats || is_last_report) {
1687  const char end = is_last_report ? '\n' : '\r';
1688  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1689  fprintf(stderr, "%s %c", buf.str, end);
1690  } else
1691  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1692 
1693  fflush(stderr);
1694  }
1695  av_bprint_finalize(&buf, NULL);
1696 
1697  if (progress_avio) {
1698  av_bprintf(&buf_script, "progress=%s\n",
1699  is_last_report ? "end" : "continue");
1700  avio_write(progress_avio, buf_script.str,
1701  FFMIN(buf_script.len, buf_script.size - 1));
1703  av_bprint_finalize(&buf_script, NULL);
1704  if (is_last_report) {
1705  if ((ret = avio_closep(&progress_avio)) < 0)
1707  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1708  }
1709  }
1710 
1711  first_report = 0;
1712 
1713  if (is_last_report)
1714  print_final_stats(total_size);
1715 }
1716 
1718 {
1719  int ret;
1720 
1721  // We never got any input. Set a fake format, which will
1722  // come from libavformat.
1723  ifilter->format = par->format;
1724  ifilter->sample_rate = par->sample_rate;
1725  ifilter->width = par->width;
1726  ifilter->height = par->height;
1727  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1728  ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
1729  if (ret < 0)
1730  return ret;
1731 
1732  return 0;
1733 }
1734 
1735 static void flush_encoders(void)
1736 {
1737  int i, ret;
1738 
1739  for (i = 0; i < nb_output_streams; i++) {
1741  AVCodecContext *enc = ost->enc_ctx;
1742  OutputFile *of = output_files[ost->file_index];
1743 
1744  if (!ost->encoding_needed)
1745  continue;
1746 
1747  // Try to enable encoding with no input frames.
1748  // Maybe we should just let encoding fail instead.
1749  if (!ost->initialized) {
1750  FilterGraph *fg = ost->filter->graph;
1751 
1753  "Finishing stream %d:%d without any data written to it.\n",
1754  ost->file_index, ost->st->index);
1755 
1756  if (ost->filter && !fg->graph) {
1757  int x;
1758  for (x = 0; x < fg->nb_inputs; x++) {
1759  InputFilter *ifilter = fg->inputs[x];
1760  if (ifilter->format < 0 &&
1761  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) {
1762  av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1763  exit_program(1);
1764  }
1765  }
1766 
1768  continue;
1769 
1770  ret = configure_filtergraph(fg);
1771  if (ret < 0) {
1772  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1773  exit_program(1);
1774  }
1775 
1777  }
1778 
1780  }
1781 
1783  continue;
1784 
1785  ret = encode_frame(of, ost, NULL);
1786  if (ret != AVERROR_EOF)
1787  exit_program(1);
1788  }
1789 }
1790 
1791 /*
1792  * Check whether a packet from ist should be written into ost at this time
1793  */
1795 {
1796  OutputFile *of = output_files[ost->file_index];
1797  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1798 
1799  if (ost->source_index != ist_index)
1800  return 0;
1801 
1802  if (ost->finished & MUXER_FINISHED)
1803  return 0;
1804 
1805  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1806  return 0;
1807 
1808  return 1;
1809 }
1810 
1812 {
1813  OutputFile *of = output_files[ost->file_index];
1814  InputFile *f = input_files [ist->file_index];
1815  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1816  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1817  AVPacket *opkt = ost->pkt;
1818 
1819  av_packet_unref(opkt);
1820  // EOF: flush output bitstream filters.
1821  if (!pkt) {
1822  output_packet(of, opkt, ost, 1);
1823  return;
1824  }
1825 
1826  if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
1827  !ost->copy_initial_nonkeyframes)
1828  return;
1829 
1830  if (!ost->streamcopy_started && !ost->copy_prior_start) {
1831  int64_t comp_start = start_time;
1832  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1833  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1834  if (pkt->pts == AV_NOPTS_VALUE ?
1835  ist->pts < comp_start :
1836  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1837  return;
1838  }
1839 
1840  if (of->recording_time != INT64_MAX &&
1841  ist->pts >= of->recording_time + start_time) {
1843  return;
1844  }
1845 
1846  if (f->recording_time != INT64_MAX) {
1847  start_time = 0;
1848  if (copy_ts) {
1849  start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
1850  start_time += start_at_zero ? 0 : f->ctx->start_time;
1851  }
1852  if (ist->pts >= f->recording_time + start_time) {
1854  return;
1855  }
1856  }
1857 
1858  if (av_packet_ref(opkt, pkt) < 0)
1859  exit_program(1);
1860 
1861  if (pkt->pts != AV_NOPTS_VALUE)
1862  opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1863 
1864  if (pkt->dts == AV_NOPTS_VALUE) {
1865  opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
1866  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1868  if(!duration)
1869  duration = ist->dec_ctx->frame_size;
1870  opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
1871  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
1872  &ist->filter_in_rescale_delta_last, ost->mux_timebase);
1873  /* dts will be set immediately afterwards to what pts is now */
1874  opkt->pts = opkt->dts - ost_tb_start_time;
1875  } else
1876  opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1877  opkt->dts -= ost_tb_start_time;
1878 
1879  opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1880 
1881  ost->sync_opts += opkt->duration;
1882 
1883  output_packet(of, opkt, ost, 0);
1884 
1885  ost->streamcopy_started = 1;
1886 }
1887 
1889 {
1890  AVCodecContext *dec = ist->dec_ctx;
1891 
1892  if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1893  char layout_name[256];
1894 
1895  if (dec->ch_layout.nb_channels > ist->guess_layout_max)
1896  return 0;
1897  av_channel_layout_default(&dec->ch_layout, dec->ch_layout.nb_channels);
1898  if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
1899  return 0;
1900  av_channel_layout_describe(&dec->ch_layout, layout_name, sizeof(layout_name));
1901  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1902  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1903  }
1904  return 1;
1905 }
1906 
1907 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1908 {
1909  if (*got_output || ret<0)
1910  decode_error_stat[ret<0] ++;
1911 
1912  if (ret < 0 && exit_on_error)
1913  exit_program(1);
1914 
1915  if (*got_output && ist) {
1918  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
1919  if (exit_on_error)
1920  exit_program(1);
1921  }
1922  }
1923 }
1924 
1925 // Filters can be configured only if the formats of all inputs are known.
1927 {
1928  int i;
1929  for (i = 0; i < fg->nb_inputs; i++) {
1930  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
1931  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
1932  return 0;
1933  }
1934  return 1;
1935 }
1936 
1937 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1938 {
1939  FilterGraph *fg = ifilter->graph;
1940  AVFrameSideData *sd;
1941  int need_reinit, ret;
1942  int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
1943 
1944  if (keep_reference)
1945  buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
1946 
1947  /* determine if the parameters for this input changed */
1948  need_reinit = ifilter->format != frame->format;
1949 
1950  switch (ifilter->ist->st->codecpar->codec_type) {
1951  case AVMEDIA_TYPE_AUDIO:
1952  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1953  av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
1954  break;
1955  case AVMEDIA_TYPE_VIDEO:
1956  need_reinit |= ifilter->width != frame->width ||
1957  ifilter->height != frame->height;
1958  break;
1959  }
1960 
1961  if (!ifilter->ist->reinit_filters && fg->graph)
1962  need_reinit = 0;
1963 
1964  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
1965  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1966  need_reinit = 1;
1967 
1969  if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
1970  need_reinit = 1;
1971  } else if (ifilter->displaymatrix)
1972  need_reinit = 1;
1973 
1974  if (need_reinit) {
1976  if (ret < 0)
1977  return ret;
1978  }
1979 
1980  /* (re)init the graph if possible, otherwise buffer the frame and return */
1981  if (need_reinit || !fg->graph) {
1982  if (!ifilter_has_all_input_formats(fg)) {
1984  if (!tmp)
1985  return AVERROR(ENOMEM);
1986 
1987  ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
1988  if (ret < 0)
1989  av_frame_free(&tmp);
1990 
1991  return ret;
1992  }
1993 
1994  ret = reap_filters(1);
1995  if (ret < 0 && ret != AVERROR_EOF) {
1996  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
1997  return ret;
1998  }
1999 
2000  ret = configure_filtergraph(fg);
2001  if (ret < 0) {
2002  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2003  return ret;
2004  }
2005  }
2006 
2007  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2008  if (ret < 0) {
2009  if (ret != AVERROR_EOF)
2010  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2011  return ret;
2012  }
2013 
2014  return 0;
2015 }
2016 
2017 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2018 {
2019  int ret;
2020 
2021  ifilter->eof = 1;
2022 
2023  if (ifilter->filter) {
2025  if (ret < 0)
2026  return ret;
2027  } else {
2028  // the filtergraph was never configured
2029  if (ifilter->format < 0) {
2030  ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2031  if (ret < 0)
2032  return ret;
2033  }
2034  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2035  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2036  return AVERROR_INVALIDDATA;
2037  }
2038  }
2039 
2040  return 0;
2041 }
2042 
2043 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2044 // There is the following difference: if you got a frame, you must call
2045 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2046 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2047 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2048 {
2049  int ret;
2050 
2051  *got_frame = 0;
2052 
2053  if (pkt) {
2054  ret = avcodec_send_packet(avctx, pkt);
2055  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2056  // decoded frames with avcodec_receive_frame() until done.
2057  if (ret < 0 && ret != AVERROR_EOF)
2058  return ret;
2059  }
2060 
2061  ret = avcodec_receive_frame(avctx, frame);
2062  if (ret < 0 && ret != AVERROR(EAGAIN))
2063  return ret;
2064  if (ret >= 0)
2065  *got_frame = 1;
2066 
2067  return 0;
2068 }
2069 
2071 {
2072  int i, ret;
2073 
2074  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2075  for (i = 0; i < ist->nb_filters; i++) {
2076  ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2077  if (ret == AVERROR_EOF)
2078  ret = 0; /* ignore */
2079  if (ret < 0) {
2081  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2082  break;
2083  }
2084  }
2085  return ret;
2086 }
2087 
2089  int *decode_failed)
2090 {
2092  AVCodecContext *avctx = ist->dec_ctx;
2093  int ret, err = 0;
2094  AVRational decoded_frame_tb;
2095 
2097  ret = decode(avctx, decoded_frame, got_output, pkt);
2098  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2099  if (ret < 0)
2100  *decode_failed = 1;
2101 
2102  if (ret >= 0 && avctx->sample_rate <= 0) {
2103  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2105  }
2106 
2107  if (ret != AVERROR_EOF)
2109 
2110  if (!*got_output || ret < 0)
2111  return ret;
2112 
2114  ist->frames_decoded++;
2115 
2116  /* increment next_dts to use for the case where the input stream does not
2117  have timestamps or there are multiple frames in the packet */
2118  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2119  avctx->sample_rate;
2120  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2121  avctx->sample_rate;
2122 
2123  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2124  decoded_frame_tb = ist->st->time_base;
2125  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2126  decoded_frame->pts = pkt->pts;
2127  decoded_frame_tb = ist->st->time_base;
2128  }else {
2129  decoded_frame->pts = ist->dts;
2130  decoded_frame_tb = AV_TIME_BASE_Q;
2131  }
2132  if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2133  pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2135  if (pkt)
2136  ist->prev_pkt_pts = pkt->pts;
2138  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2139  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2140  (AVRational){1, avctx->sample_rate});
2143 
2145  return err < 0 ? err : ret;
2146 }
2147 
2148 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2149  int *decode_failed)
2150 {
2152  int i, ret = 0, err = 0;
2153  int64_t best_effort_timestamp;
2154  int64_t dts = AV_NOPTS_VALUE;
2155 
2156  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2157  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2158  // skip the packet.
2159  if (!eof && pkt && pkt->size == 0)
2160  return 0;
2161 
2162  if (ist->dts != AV_NOPTS_VALUE)
2163  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2164  if (pkt) {
2165  pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2166  }
2167 
2168  // The old code used to set dts on the drain packet, which does not work
2169  // with the new API anymore.
2170  if (eof) {
2171  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2172  if (!new)
2173  return AVERROR(ENOMEM);
2174  ist->dts_buffer = new;
2175  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2176  }
2177 
2180  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2181  if (ret < 0)
2182  *decode_failed = 1;
2183 
2184  // The following line may be required in some cases where there is no parser
2185  // or the parser does not has_b_frames correctly
2186  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2187  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2188  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2189  } else
2191  "video_delay is larger in decoder than demuxer %d > %d.\n"
2192  "If you want to help, upload a sample "
2193  "of this file to https://streams.videolan.org/upload/ "
2194  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2195  ist->dec_ctx->has_b_frames,
2196  ist->st->codecpar->video_delay);
2197  }
2198 
2199  if (ret != AVERROR_EOF)
2201 
2202  if (*got_output && ret >= 0) {
2203  if (ist->dec_ctx->width != decoded_frame->width ||
2204  ist->dec_ctx->height != decoded_frame->height ||
2205  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2206  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2210  ist->dec_ctx->width,
2211  ist->dec_ctx->height,
2212  ist->dec_ctx->pix_fmt);
2213  }
2214  }
2215 
2216  if (!*got_output || ret < 0)
2217  return ret;
2218 
2219  if(ist->top_field_first>=0)
2221 
2222  ist->frames_decoded++;
2223 
2225  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2226  if (err < 0)
2227  goto fail;
2228  }
2230 
2231  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2232  *duration_pts = decoded_frame->pkt_duration;
2233 
2234  if (ist->framerate.num)
2235  best_effort_timestamp = ist->cfr_next_pts++;
2236 
2237  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2238  best_effort_timestamp = ist->dts_buffer[0];
2239 
2240  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2241  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2242  ist->nb_dts_buffer--;
2243  }
2244 
2245  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2246  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2247 
2248  if (ts != AV_NOPTS_VALUE)
2249  ist->next_pts = ist->pts = ts;
2250  }
2251 
2252  if (debug_ts) {
2253  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2254  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2255  ist->st->index, av_ts2str(decoded_frame->pts),
2257  best_effort_timestamp,
2258  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2260  ist->st->time_base.num, ist->st->time_base.den);
2261  }
2262 
2263  if (ist->st->sample_aspect_ratio.num)
2265 
2267 
2268 fail:
2270  return err < 0 ? err : ret;
2271 }
2272 
2274  int *decode_failed)
2275 {
2277  int free_sub = 1;
2278  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2279  &subtitle, got_output, pkt);
2280 
2282 
2283  if (ret < 0 || !*got_output) {
2284  *decode_failed = 1;
2285  if (!pkt->size)
2286  sub2video_flush(ist);
2287  return ret;
2288  }
2289 
2290  if (ist->fix_sub_duration) {
2291  int end = 1;
2292  if (ist->prev_sub.got_output) {
2293  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2294  1000, AV_TIME_BASE);
2295  if (end < ist->prev_sub.subtitle.end_display_time) {
2296  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2297  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2299  end <= 0 ? ", dropping it" : "");
2300  ist->prev_sub.subtitle.end_display_time = end;
2301  }
2302  }
2303  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2304  FFSWAP(int, ret, ist->prev_sub.ret);
2306  if (end <= 0)
2307  goto out;
2308  }
2309 
2310  if (!*got_output)
2311  return ret;
2312 
2313  if (ist->sub2video.frame) {
2314  sub2video_update(ist, INT64_MIN, &subtitle);
2315  } else if (ist->nb_filters) {
2316  if (!ist->sub2video.sub_queue)
2318  if (!ist->sub2video.sub_queue)
2319  exit_program(1);
2320 
2322  if (ret < 0)
2323  exit_program(1);
2324  free_sub = 0;
2325  }
2326 
2327  if (!subtitle.num_rects)
2328  goto out;
2329 
2330  ist->frames_decoded++;
2331 
2332  for (i = 0; i < nb_output_streams; i++) {
2334 
2335  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2336  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2337  continue;
2338 
2339  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2340  }
2341 
2342 out:
2343  if (free_sub)
2345  return ret;
2346 }
2347 
2349 {
2350  int i, ret;
2351  /* TODO keep pts also in stream time base to avoid converting back */
2352  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2354 
2355  for (i = 0; i < ist->nb_filters; i++) {
2356  ret = ifilter_send_eof(ist->filters[i], pts);
2357  if (ret < 0)
2358  return ret;
2359  }
2360  return 0;
2361 }
2362 
2363 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2364 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2365 {
2366  int ret = 0, i;
2367  int repeating = 0;
2368  int eof_reached = 0;
2369 
2370  AVPacket *avpkt = ist->pkt;
2371 
2372  if (!ist->saw_first_ts) {
2373  ist->first_dts =
2374  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2375  ist->pts = 0;
2376  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2377  ist->first_dts =
2378  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2379  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2380  }
2381  ist->saw_first_ts = 1;
2382  }
2383 
2384  if (ist->next_dts == AV_NOPTS_VALUE)
2385  ist->next_dts = ist->dts;
2386  if (ist->next_pts == AV_NOPTS_VALUE)
2387  ist->next_pts = ist->pts;
2388 
2389  if (pkt) {
2390  av_packet_unref(avpkt);
2391  ret = av_packet_ref(avpkt, pkt);
2392  if (ret < 0)
2393  return ret;
2394  }
2395 
2396  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2397  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2398  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2399  ist->next_pts = ist->pts = ist->dts;
2400  }
2401 
2402  // while we have more to decode or while the decoder did output something on EOF
2403  while (ist->decoding_needed) {
2404  int64_t duration_dts = 0;
2405  int64_t duration_pts = 0;
2406  int got_output = 0;
2407  int decode_failed = 0;
2408 
2409  ist->pts = ist->next_pts;
2410  ist->dts = ist->next_dts;
2411 
2412  switch (ist->dec_ctx->codec_type) {
2413  case AVMEDIA_TYPE_AUDIO:
2414  ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2415  &decode_failed);
2416  av_packet_unref(avpkt);
2417  break;
2418  case AVMEDIA_TYPE_VIDEO:
2419  ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2420  &decode_failed);
2421  if (!repeating || !pkt || got_output) {
2422  if (pkt && pkt->duration) {
2423  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2424  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2426  duration_dts = ((int64_t)AV_TIME_BASE *
2427  ist->dec_ctx->framerate.den * ticks) /
2429  }
2430 
2431  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2432  ist->next_dts += duration_dts;
2433  }else
2434  ist->next_dts = AV_NOPTS_VALUE;
2435  }
2436 
2437  if (got_output) {
2438  if (duration_pts > 0) {
2439  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2440  } else {
2441  ist->next_pts += duration_dts;
2442  }
2443  }
2444  av_packet_unref(avpkt);
2445  break;
2446  case AVMEDIA_TYPE_SUBTITLE:
2447  if (repeating)
2448  break;
2449  ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2450  if (!pkt && ret >= 0)
2451  ret = AVERROR_EOF;
2452  av_packet_unref(avpkt);
2453  break;
2454  default:
2455  return -1;
2456  }
2457 
2458  if (ret == AVERROR_EOF) {
2459  eof_reached = 1;
2460  break;
2461  }
2462 
2463  if (ret < 0) {
2464  if (decode_failed) {
2465  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2466  ist->file_index, ist->st->index, av_err2str(ret));
2467  } else {
2468  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2469  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2470  }
2471  if (!decode_failed || exit_on_error)
2472  exit_program(1);
2473  break;
2474  }
2475 
2476  if (got_output)
2477  ist->got_output = 1;
2478 
2479  if (!got_output)
2480  break;
2481 
2482  // During draining, we might get multiple output frames in this loop.
2483  // ffmpeg.c does not drain the filter chain on configuration changes,
2484  // which means if we send multiple frames at once to the filters, and
2485  // one of those frames changes configuration, the buffered frames will
2486  // be lost. This can upset certain FATE tests.
2487  // Decode only 1 frame per call on EOF to appease these FATE tests.
2488  // The ideal solution would be to rewrite decoding to use the new
2489  // decoding API in a better way.
2490  if (!pkt)
2491  break;
2492 
2493  repeating = 1;
2494  }
2495 
2496  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2497  /* except when looping we need to flush but not to send an EOF */
2498  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2499  int ret = send_filter_eof(ist);
2500  if (ret < 0) {
2501  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2502  exit_program(1);
2503  }
2504  }
2505 
2506  /* handle stream copy */
2507  if (!ist->decoding_needed && pkt) {
2508  ist->dts = ist->next_dts;
2509  switch (ist->dec_ctx->codec_type) {
2510  case AVMEDIA_TYPE_AUDIO:
2511  av_assert1(pkt->duration >= 0);
2512  if (ist->dec_ctx->sample_rate) {
2513  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2514  ist->dec_ctx->sample_rate;
2515  } else {
2517  }
2518  break;
2519  case AVMEDIA_TYPE_VIDEO:
2520  if (ist->framerate.num) {
2521  // TODO: Remove work-around for c99-to-c89 issue 7
2522  AVRational time_base_q = AV_TIME_BASE_Q;
2523  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2524  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2525  } else if (pkt->duration) {
2527  } else if(ist->dec_ctx->framerate.num != 0) {
2528  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2529  ist->next_dts += ((int64_t)AV_TIME_BASE *
2530  ist->dec_ctx->framerate.den * ticks) /
2532  }
2533  break;
2534  }
2535  ist->pts = ist->dts;
2536  ist->next_pts = ist->next_dts;
2537  } else if (!ist->decoding_needed)
2538  eof_reached = 1;
2539 
2540  for (i = 0; i < nb_output_streams; i++) {
2542 
2543  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2544  continue;
2545 
2546  do_streamcopy(ist, ost, pkt);
2547  }
2548 
2549  return !eof_reached;
2550 }
2551 
2553 {
2554  InputStream *ist = s->opaque;
2555  const enum AVPixelFormat *p;
2556  int ret;
2557 
2558  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2560  const AVCodecHWConfig *config = NULL;
2561  int i;
2562 
2563  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2564  break;
2565 
2566  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2567  ist->hwaccel_id == HWACCEL_AUTO) {
2568  for (i = 0;; i++) {
2569  config = avcodec_get_hw_config(s->codec, i);
2570  if (!config)
2571  break;
2572  if (!(config->methods &
2574  continue;
2575  if (config->pix_fmt == *p)
2576  break;
2577  }
2578  }
2579  if (config && config->device_type == ist->hwaccel_device_type) {
2581  if (ret < 0) {
2582  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2584  "%s hwaccel requested for input stream #%d:%d, "
2585  "but cannot be initialized.\n",
2586  av_hwdevice_get_type_name(config->device_type),
2587  ist->file_index, ist->st->index);
2588  return AV_PIX_FMT_NONE;
2589  }
2590  continue;
2591  }
2592 
2593  ist->hwaccel_pix_fmt = *p;
2594  break;
2595  }
2596  }
2597 
2598  return *p;
2599 }
2600 
2601 static int init_input_stream(int ist_index, char *error, int error_len)
2602 {
2603  int ret;
2604  InputStream *ist = input_streams[ist_index];
2605 
2606  if (ist->decoding_needed) {
2607  const AVCodec *codec = ist->dec;
2608  if (!codec) {
2609  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2610  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2611  return AVERROR(EINVAL);
2612  }
2613 
2614  ist->dec_ctx->opaque = ist;
2615  ist->dec_ctx->get_format = get_format;
2616 #if LIBAVCODEC_VERSION_MAJOR < 60
2618  ist->dec_ctx->thread_safe_callbacks = 1;
2619  })
2620 #endif
2621 
2622  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2623  (ist->decoding_needed & DECODING_FOR_OST)) {
2624  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2626  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2627  }
2628 
2629  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2630  * audio, and video decoders such as cuvid or mediacodec */
2631  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2632 
2633  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2634  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2635  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2637  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2638 
2640  if (ret < 0) {
2641  snprintf(error, error_len, "Device setup failed for "
2642  "decoder on input stream #%d:%d : %s",
2643  ist->file_index, ist->st->index, av_err2str(ret));
2644  return ret;
2645  }
2646 
2647  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2648  if (ret == AVERROR_EXPERIMENTAL)
2649  abort_codec_experimental(codec, 0);
2650 
2651  snprintf(error, error_len,
2652  "Error while opening decoder for input stream "
2653  "#%d:%d : %s",
2654  ist->file_index, ist->st->index, av_err2str(ret));
2655  return ret;
2656  }
2658  }
2659 
2660  ist->next_pts = AV_NOPTS_VALUE;
2661  ist->next_dts = AV_NOPTS_VALUE;
2662 
2663  return 0;
2664 }
2665 
2667 {
2668  if (ost->source_index >= 0)
2669  return input_streams[ost->source_index];
2670  return NULL;
2671 }
2672 
2673 static int compare_int64(const void *a, const void *b)
2674 {
2675  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2676 }
2677 
2679 {
2680  AVBSFContext *ctx = ost->bsf_ctx;
2681  int ret;
2682 
2683  if (!ctx)
2684  return 0;
2685 
2686  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
2687  if (ret < 0)
2688  return ret;
2689 
2690  ctx->time_base_in = ost->st->time_base;
2691 
2692  ret = av_bsf_init(ctx);
2693  if (ret < 0) {
2694  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2695  ctx->filter->name);
2696  return ret;
2697  }
2698 
2699  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2700  if (ret < 0)
2701  return ret;
2702  ost->st->time_base = ctx->time_base_out;
2703 
2704  return 0;
2705 }
2706 
2708 {
2709  OutputFile *of = output_files[ost->file_index];
2711  AVCodecParameters *par_dst = ost->st->codecpar;
2712  AVCodecParameters *par_src = ost->ref_par;
2713  AVRational sar;
2714  int i, ret;
2715  uint32_t codec_tag = par_dst->codec_tag;
2716 
2717  av_assert0(ist && !ost->filter);
2718 
2719  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2720  if (ret >= 0)
2721  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2722  if (ret < 0) {
2724  "Error setting up codec context options.\n");
2725  return ret;
2726  }
2727 
2728  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
2729  if (ret < 0) {
2731  "Error getting reference codec parameters.\n");
2732  return ret;
2733  }
2734 
2735  if (!codec_tag) {
2736  unsigned int codec_tag_tmp;
2737  if (!of->format->codec_tag ||
2738  av_codec_get_id (of->format->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2739  !av_codec_get_tag2(of->format->codec_tag, par_src->codec_id, &codec_tag_tmp))
2740  codec_tag = par_src->codec_tag;
2741  }
2742 
2743  ret = avcodec_parameters_copy(par_dst, par_src);
2744  if (ret < 0)
2745  return ret;
2746 
2747  par_dst->codec_tag = codec_tag;
2748 
2749  if (!ost->frame_rate.num)
2750  ost->frame_rate = ist->framerate;
2751 
2752  if (ost->frame_rate.num)
2753  ost->st->avg_frame_rate = ost->frame_rate;
2754  else
2755  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2756 
2758  if (ret < 0)
2759  return ret;
2760 
2761  // copy timebase while removing common factors
2762  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2763  if (ost->frame_rate.num)
2764  ost->st->time_base = av_inv_q(ost->frame_rate);
2765  else
2767  }
2768 
2769  // copy estimated duration as a hint to the muxer
2770  if (ost->st->duration <= 0 && ist->st->duration > 0)
2771  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2772 
2773  if (ist->st->nb_side_data) {
2774  for (i = 0; i < ist->st->nb_side_data; i++) {
2775  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2776  uint8_t *dst_data;
2777 
2778  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2779  if (!dst_data)
2780  return AVERROR(ENOMEM);
2781  memcpy(dst_data, sd_src->data, sd_src->size);
2782  }
2783  }
2784 
2785  if (ost->rotate_overridden) {
2787  sizeof(int32_t) * 9);
2788  if (sd)
2789  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2790  }
2791 
2792  switch (par_dst->codec_type) {
2793  case AVMEDIA_TYPE_AUDIO:
2794  if (audio_volume != 256) {
2795  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2796  exit_program(1);
2797  }
2798  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2799  par_dst->block_align= 0;
2800  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2801  par_dst->block_align= 0;
2802  break;
2803  case AVMEDIA_TYPE_VIDEO:
2804  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2805  sar =
2806  av_mul_q(ost->frame_aspect_ratio,
2807  (AVRational){ par_dst->height, par_dst->width });
2808  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2809  "with stream copy may produce invalid files\n");
2810  }
2811  else if (ist->st->sample_aspect_ratio.num)
2812  sar = ist->st->sample_aspect_ratio;
2813  else
2814  sar = par_src->sample_aspect_ratio;
2815  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2816  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2817  ost->st->r_frame_rate = ist->st->r_frame_rate;
2818  break;
2819  }
2820 
2821  ost->mux_timebase = ist->st->time_base;
2822 
2823  return 0;
2824 }
2825 
2827 {
2828  const AVDictionaryEntry *e;
2829 
2830  uint8_t *encoder_string;
2831  int encoder_string_len;
2832  int format_flags = 0;
2833  int codec_flags = ost->enc_ctx->flags;
2834 
2835  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2836  return;
2837 
2838  e = av_dict_get(of->opts, "fflags", NULL, 0);
2839  if (e) {
2840  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2841  if (!o)
2842  return;
2843  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2844  }
2845  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2846  if (e) {
2847  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2848  if (!o)
2849  return;
2850  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2851  }
2852 
2853  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2854  encoder_string = av_mallocz(encoder_string_len);
2855  if (!encoder_string)
2856  exit_program(1);
2857 
2858  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2859  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2860  else
2861  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2862  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2863  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2865 }
2866 
2868  AVCodecContext *avctx)
2869 {
2870  char *p;
2871  int n = 1, i, size, index = 0;
2872  int64_t t, *pts;
2873 
2874  for (p = kf; *p; p++)
2875  if (*p == ',')
2876  n++;
2877  size = n;
2878  pts = av_malloc_array(size, sizeof(*pts));
2879  if (!pts) {
2880  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2881  exit_program(1);
2882  }
2883 
2884  p = kf;
2885  for (i = 0; i < n; i++) {
2886  char *next = strchr(p, ',');
2887 
2888  if (next)
2889  *next++ = 0;
2890 
2891  if (!memcmp(p, "chapters", 8)) {
2892 
2893  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2894  int j;
2895 
2896  if (avf->nb_chapters > INT_MAX - size ||
2897  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2898  sizeof(*pts)))) {
2900  "Could not allocate forced key frames array.\n");
2901  exit_program(1);
2902  }
2903  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2904  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2905 
2906  for (j = 0; j < avf->nb_chapters; j++) {
2907  AVChapter *c = avf->chapters[j];
2908  av_assert1(index < size);
2909  pts[index++] = av_rescale_q(c->start, c->time_base,
2910  avctx->time_base) + t;
2911  }
2912 
2913  } else {
2914 
2915  t = parse_time_or_die("force_key_frames", p, 1);
2916  av_assert1(index < size);
2917  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2918 
2919  }
2920 
2921  p = next;
2922  }
2923 
2924  av_assert0(index == size);
2925  qsort(pts, size, sizeof(*pts), compare_int64);
2926  ost->forced_kf_count = size;
2927  ost->forced_kf_pts = pts;
2928 }
2929 
2930 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
2931 {
2933  AVCodecContext *enc_ctx = ost->enc_ctx;
2934  AVFormatContext *oc;
2935 
2936  if (ost->enc_timebase.num > 0) {
2937  enc_ctx->time_base = ost->enc_timebase;
2938  return;
2939  }
2940 
2941  if (ost->enc_timebase.num < 0) {
2942  if (ist) {
2943  enc_ctx->time_base = ist->st->time_base;
2944  return;
2945  }
2946 
2947  oc = output_files[ost->file_index]->ctx;
2948  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
2949  }
2950 
2951  enc_ctx->time_base = default_time_base;
2952 }
2953 
2955 {
2957  AVCodecContext *enc_ctx = ost->enc_ctx;
2959  OutputFile *of = output_files[ost->file_index];
2960  AVFormatContext *oc = of->ctx;
2961  int ret;
2962 
2963  set_encoder_id(output_files[ost->file_index], ost);
2964 
2965  if (ist) {
2966  dec_ctx = ist->dec_ctx;
2967  }
2968 
2969  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2970  if (!ost->frame_rate.num)
2971  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2972  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
2973  ost->frame_rate = (AVRational){25, 1};
2975  "No information "
2976  "about the input framerate is available. Falling "
2977  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2978  "if you want a different framerate.\n",
2979  ost->file_index, ost->index);
2980  }
2981 
2982  if (ost->max_frame_rate.num &&
2983  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
2984  !ost->frame_rate.den))
2985  ost->frame_rate = ost->max_frame_rate;
2986 
2987  if (ost->enc->supported_framerates && !ost->force_fps) {
2988  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2989  ost->frame_rate = ost->enc->supported_framerates[idx];
2990  }
2991  // reduce frame rate for mpeg4 to be within the spec limits
2992  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2993  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2994  ost->frame_rate.num, ost->frame_rate.den, 65535);
2995  }
2996  }
2997 
2998  switch (enc_ctx->codec_type) {
2999  case AVMEDIA_TYPE_AUDIO:
3000  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3001  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3002  ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3003  if (ret < 0)
3004  return ret;
3005 
3006  if (ost->bits_per_raw_sample)
3007  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3008  else if (dec_ctx && ost->filter->graph->is_meta)
3010  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3011 
3013  break;
3014 
3015  case AVMEDIA_TYPE_VIDEO:
3016  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3017 
3018  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3019  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3020  if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3021  && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3022  (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3023  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3024  "Please consider specifying a lower framerate, a different muxer or "
3025  "setting vsync/fps_mode to vfr\n");
3026  }
3027 
3028  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3029  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3030  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3031  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3032  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3033  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3034 
3035  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3036 
3037  if (ost->bits_per_raw_sample)
3038  enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3039  else if (dec_ctx && ost->filter->graph->is_meta)
3041  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3042 
3043  if (frame) {
3044  enc_ctx->color_range = frame->color_range;
3045  enc_ctx->color_primaries = frame->color_primaries;
3046  enc_ctx->color_trc = frame->color_trc;
3047  enc_ctx->colorspace = frame->colorspace;
3048  enc_ctx->chroma_sample_location = frame->chroma_location;
3049  }
3050 
3051  enc_ctx->framerate = ost->frame_rate;
3052 
3053  ost->st->avg_frame_rate = ost->frame_rate;
3054 
3055  // Field order: autodetection
3056  if (frame) {
3058  ost->top_field_first >= 0)
3059  frame->top_field_first = !!ost->top_field_first;
3060 
3061  if (frame->interlaced_frame) {
3062  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3063  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3064  else
3065  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3066  } else
3067  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3068  }
3069 
3070  // Field order: override
3071  if (ost->top_field_first == 0) {
3072  enc_ctx->field_order = AV_FIELD_BB;
3073  } else if (ost->top_field_first == 1) {
3074  enc_ctx->field_order = AV_FIELD_TT;
3075  }
3076 
3077  if (ost->forced_keyframes) {
3078  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3079  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3081  if (ret < 0) {
3083  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3084  return ret;
3085  }
3086  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3087  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3088  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3089  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3090 
3091  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3092  // parse it only for static kf timings
3093  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3094  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3095  }
3096  }
3097  break;
3098  case AVMEDIA_TYPE_SUBTITLE:
3099  enc_ctx->time_base = AV_TIME_BASE_Q;
3100  if (!enc_ctx->width) {
3101  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3102  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3103  }
3104  break;
3105  case AVMEDIA_TYPE_DATA:
3106  break;
3107  default:
3108  abort();
3109  break;
3110  }
3111 
3112  ost->mux_timebase = enc_ctx->time_base;
3113 
3114  return 0;
3115 }
3116 
3118  char *error, int error_len)
3119 {
3120  int ret = 0;
3121 
3122  if (ost->encoding_needed) {
3123  const AVCodec *codec = ost->enc;
3124  AVCodecContext *dec = NULL;
3125  InputStream *ist;
3126 
3128  if (ret < 0)
3129  return ret;
3130 
3131  if ((ist = get_input_stream(ost)))
3132  dec = ist->dec_ctx;
3133  if (dec && dec->subtitle_header) {
3134  /* ASS code assumes this buffer is null terminated so add extra byte. */
3135  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3136  if (!ost->enc_ctx->subtitle_header)
3137  return AVERROR(ENOMEM);
3138  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3139  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3140  }
3141  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3142  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3143 
3145  if (ret < 0) {
3146  snprintf(error, error_len, "Device setup failed for "
3147  "encoder on output stream #%d:%d : %s",
3148  ost->file_index, ost->index, av_err2str(ret));
3149  return ret;
3150  }
3151 
3152  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3153  int input_props = 0, output_props = 0;
3154  AVCodecDescriptor const *input_descriptor =
3155  avcodec_descriptor_get(dec->codec_id);
3156  AVCodecDescriptor const *output_descriptor =
3157  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3158  if (input_descriptor)
3159  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3160  if (output_descriptor)
3161  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3162  if (input_props && output_props && input_props != output_props) {
3163  snprintf(error, error_len,
3164  "Subtitle encoding currently only possible from text to text "
3165  "or bitmap to bitmap");
3166  return AVERROR_INVALIDDATA;
3167  }
3168  }
3169 
3170  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3171  if (ret == AVERROR_EXPERIMENTAL)
3172  abort_codec_experimental(codec, 1);
3173  snprintf(error, error_len,
3174  "Error while opening encoder for output stream #%d:%d - "
3175  "maybe incorrect parameters such as bit_rate, rate, width or height",
3176  ost->file_index, ost->index);
3177  return ret;
3178  }
3179  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3180  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3181  av_buffersink_set_frame_size(ost->filter->filter,
3182  ost->enc_ctx->frame_size);
3183  assert_avoptions(ost->encoder_opts);
3184  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3185  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3186  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3187  " It takes bits/s as argument, not kbits/s\n");
3188 
3190  if (ret < 0) {
3192  "Error initializing the output stream codec context.\n");
3193  exit_program(1);
3194  }
3195 
3196  if (ost->enc_ctx->nb_coded_side_data) {
3197  int i;
3198 
3199  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3200  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3201  uint8_t *dst_data;
3202 
3203  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3204  if (!dst_data)
3205  return AVERROR(ENOMEM);
3206  memcpy(dst_data, sd_src->data, sd_src->size);
3207  }
3208  }
3209 
3210  /*
3211  * Add global input side data. For now this is naive, and copies it
3212  * from the input stream's global side data. All side data should
3213  * really be funneled over AVFrame and libavfilter, then added back to
3214  * packet side data, and then potentially using the first packet for
3215  * global side data.
3216  */
3217  if (ist) {
3218  int i;
3219  for (i = 0; i < ist->st->nb_side_data; i++) {
3220  AVPacketSideData *sd = &ist->st->side_data[i];
3221  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3222  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3223  if (!dst)
3224  return AVERROR(ENOMEM);
3225  memcpy(dst, sd->data, sd->size);
3226  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3227  av_display_rotation_set((uint32_t *)dst, 0);
3228  }
3229  }
3230  }
3231 
3232  // copy timebase while removing common factors
3233  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3234  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3235 
3236  // copy estimated duration as a hint to the muxer
3237  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3238  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3239  } else if (ost->stream_copy) {
3241  if (ret < 0)
3242  return ret;
3243  }
3244 
3245  /* initialize bitstream filters for the output stream
3246  * needs to be done here, because the codec id for streamcopy is not
3247  * known until now */
3249  if (ret < 0)
3250  return ret;
3251 
3252  ost->initialized = 1;
3253 
3254  ret = of_check_init(output_files[ost->file_index]);
3255  if (ret < 0)
3256  return ret;
3257 
3258  return ret;
3259 }
3260 
3261 static void report_new_stream(int input_index, AVPacket *pkt)
3262 {
3263  InputFile *file = input_files[input_index];
3264  AVStream *st = file->ctx->streams[pkt->stream_index];
3265 
3266  if (pkt->stream_index < file->nb_streams_warn)
3267  return;
3268  av_log(file->ctx, AV_LOG_WARNING,
3269  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3271  input_index, pkt->stream_index,
3273  file->nb_streams_warn = pkt->stream_index + 1;
3274 }
3275 
3276 static int transcode_init(void)
3277 {
3278  int ret = 0, i, j, k;
3279  AVFormatContext *oc;
3280  OutputStream *ost;
3281  InputStream *ist;
3282  char error[1024] = {0};
3283 
3284  for (i = 0; i < nb_filtergraphs; i++) {
3285  FilterGraph *fg = filtergraphs[i];
3286  for (j = 0; j < fg->nb_outputs; j++) {
3287  OutputFilter *ofilter = fg->outputs[j];
3288  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3289  continue;
3290  if (fg->nb_inputs != 1)
3291  continue;
3292  for (k = nb_input_streams-1; k >= 0 ; k--)
3293  if (fg->inputs[0]->ist == input_streams[k])
3294  break;
3295  ofilter->ost->source_index = k;
3296  }
3297  }
3298 
3299  /* init framerate emulation */
3300  for (i = 0; i < nb_input_files; i++) {
3302  if (ifile->readrate || ifile->rate_emu)
3303  for (j = 0; j < ifile->nb_streams; j++)
3304  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3305  }
3306 
3307  /* init input streams */
3308  for (i = 0; i < nb_input_streams; i++)
3309  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3310  for (i = 0; i < nb_output_streams; i++) {
3311  ost = output_streams[i];
3312  avcodec_close(ost->enc_ctx);
3313  }
3314  goto dump_format;
3315  }
3316 
3317  /*
3318  * initialize stream copy and subtitle/data streams.
3319  * Encoded AVFrame based streams will get initialized as follows:
3320  * - when the first AVFrame is received in do_video_out
3321  * - just before the first AVFrame is received in either transcode_step
3322  * or reap_filters due to us requiring the filter chain buffer sink
3323  * to be configured with the correct audio frame size, which is only
3324  * known after the encoder is initialized.
3325  */
3326  for (i = 0; i < nb_output_streams; i++) {
3327  if (!output_streams[i]->stream_copy &&
3328  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3330  continue;
3331 
3333  if (ret < 0)
3334  goto dump_format;
3335  }
3336 
3337  /* discard unused programs */
3338  for (i = 0; i < nb_input_files; i++) {
3340  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3341  AVProgram *p = ifile->ctx->programs[j];
3342  int discard = AVDISCARD_ALL;
3343 
3344  for (k = 0; k < p->nb_stream_indexes; k++)
3345  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3347  break;
3348  }
3349  p->discard = discard;
3350  }
3351  }
3352 
3353  /* write headers for files with no streams */
3354  for (i = 0; i < nb_output_files; i++) {
3355  oc = output_files[i]->ctx;
3356  if (output_files[i]->format->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3358  if (ret < 0)
3359  goto dump_format;
3360  }
3361  }
3362 
3363  dump_format:
3364  /* dump the stream mapping */
3365  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3366  for (i = 0; i < nb_input_streams; i++) {
3367  ist = input_streams[i];
3368 
3369  for (j = 0; j < ist->nb_filters; j++) {
3370  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3371  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3372  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3373  ist->filters[j]->name);
3374  if (nb_filtergraphs > 1)
3375  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3376  av_log(NULL, AV_LOG_INFO, "\n");
3377  }
3378  }
3379  }
3380 
3381  for (i = 0; i < nb_output_streams; i++) {
3382  ost = output_streams[i];
3383 
3384  if (ost->attachment_filename) {
3385  /* an attached file */
3386  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3387  ost->attachment_filename, ost->file_index, ost->index);
3388  continue;
3389  }
3390 
3391  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3392  /* output from a complex graph */
3393  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3394  if (nb_filtergraphs > 1)
3395  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3396 
3397  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3398  ost->index, ost->enc ? ost->enc->name : "?");
3399  continue;
3400  }
3401 
3402  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3403  input_streams[ost->source_index]->file_index,
3404  input_streams[ost->source_index]->st->index,
3405  ost->file_index,
3406  ost->index);
3407  if (ost->sync_ist != input_streams[ost->source_index])
3408  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3409  ost->sync_ist->file_index,
3410  ost->sync_ist->st->index);
3411  if (ost->stream_copy)
3412  av_log(NULL, AV_LOG_INFO, " (copy)");
3413  else {
3414  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3415  const AVCodec *out_codec = ost->enc;
3416  const char *decoder_name = "?";
3417  const char *in_codec_name = "?";
3418  const char *encoder_name = "?";
3419  const char *out_codec_name = "?";
3420  const AVCodecDescriptor *desc;
3421 
3422  if (in_codec) {
3423  decoder_name = in_codec->name;
3424  desc = avcodec_descriptor_get(in_codec->id);
3425  if (desc)
3426  in_codec_name = desc->name;
3427  if (!strcmp(decoder_name, in_codec_name))
3428  decoder_name = "native";
3429  }
3430 
3431  if (out_codec) {
3432  encoder_name = out_codec->name;
3433  desc = avcodec_descriptor_get(out_codec->id);
3434  if (desc)
3435  out_codec_name = desc->name;
3436  if (!strcmp(encoder_name, out_codec_name))
3437  encoder_name = "native";
3438  }
3439 
3440  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3441  in_codec_name, decoder_name,
3442  out_codec_name, encoder_name);
3443  }
3444  av_log(NULL, AV_LOG_INFO, "\n");
3445  }
3446 
3447  if (ret) {
3448  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3449  return ret;
3450  }
3451 
3453 
3454  return 0;
3455 }
3456 
3457 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3458 static int need_output(void)
3459 {
3460  int i;
3461 
3462  for (i = 0; i < nb_output_streams; i++) {
3464  OutputFile *of = output_files[ost->file_index];
3465  AVFormatContext *os = output_files[ost->file_index]->ctx;
3466 
3467  if (ost->finished ||
3468  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3469  continue;
3470  if (ost->frame_number >= ost->max_frames) {
3471  int j;
3472  for (j = 0; j < of->ctx->nb_streams; j++)
3474  continue;
3475  }
3476 
3477  return 1;
3478  }
3479 
3480  return 0;
3481 }
3482 
3483 /**
3484  * Select the output stream to process.
3485  *
3486  * @return selected output stream, or NULL if none available
3487  */
3489 {
3490  int i;
3491  int64_t opts_min = INT64_MAX;
3492  OutputStream *ost_min = NULL;
3493 
3494  for (i = 0; i < nb_output_streams; i++) {
3496  int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3497  av_rescale_q(ost->last_mux_dts, ost->st->time_base,
3498  AV_TIME_BASE_Q);
3499  if (ost->last_mux_dts == AV_NOPTS_VALUE)
3501  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3502  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3503 
3504  if (!ost->initialized && !ost->inputs_done)
3505  return ost->unavailable ? NULL : ost;
3506 
3507  if (!ost->finished && opts < opts_min) {
3508  opts_min = opts;
3509  ost_min = ost->unavailable ? NULL : ost;
3510  }
3511  }
3512  return ost_min;
3513 }
3514 
3515 static void set_tty_echo(int on)
3516 {
3517 #if HAVE_TERMIOS_H
3518  struct termios tty;
3519  if (tcgetattr(0, &tty) == 0) {
3520  if (on) tty.c_lflag |= ECHO;
3521  else tty.c_lflag &= ~ECHO;
3522  tcsetattr(0, TCSANOW, &tty);
3523  }
3524 #endif
3525 }
3526 
3527 static int check_keyboard_interaction(int64_t cur_time)
3528 {
3529  int i, ret, key;
3530  static int64_t last_time;
3531  if (received_nb_signals)
3532  return AVERROR_EXIT;
3533  /* read_key() returns 0 on EOF */
3534  if (cur_time - last_time >= 100000) {
3535  key = read_key();
3536  last_time = cur_time;
3537  }else
3538  key = -1;
3539  if (key == 'q') {
3540  av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3541  return AVERROR_EXIT;
3542  }
3543  if (key == '+') av_log_set_level(av_log_get_level()+10);
3544  if (key == '-') av_log_set_level(av_log_get_level()-10);
3545  if (key == 's') qp_hist ^= 1;
3546  if (key == 'h'){
3547  if (do_hex_dump){
3548  do_hex_dump = do_pkt_dump = 0;
3549  } else if(do_pkt_dump){
3550  do_hex_dump = 1;
3551  } else
3552  do_pkt_dump = 1;
3554  }
3555  if (key == 'c' || key == 'C'){
3556  char buf[4096], target[64], command[256], arg[256] = {0};
3557  double time;
3558  int k, n = 0;
3559  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3560  i = 0;
3561  set_tty_echo(1);
3562  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3563  if (k > 0)
3564  buf[i++] = k;
3565  buf[i] = 0;
3566  set_tty_echo(0);
3567  fprintf(stderr, "\n");
3568  if (k > 0 &&
3569  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3570  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3571  target, time, command, arg);
3572  for (i = 0; i < nb_filtergraphs; i++) {
3573  FilterGraph *fg = filtergraphs[i];
3574  if (fg->graph) {
3575  if (time < 0) {
3576  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3577  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3578  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3579  } else if (key == 'c') {
3580  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3582  } else {
3583  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3584  if (ret < 0)
3585  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3586  }
3587  }
3588  }
3589  } else {
3591  "Parse error, at least 3 arguments were expected, "
3592  "only %d given in string '%s'\n", n, buf);
3593  }
3594  }
3595  if (key == 'd' || key == 'D'){
3596  int debug=0;
3597  if(key == 'D') {
3598  debug = input_streams[0]->dec_ctx->debug << 1;
3599  if(!debug) debug = 1;
3600  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3601  debug += debug;
3602  }else{
3603  char buf[32];
3604  int k = 0;
3605  i = 0;
3606  set_tty_echo(1);
3607  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3608  if (k > 0)
3609  buf[i++] = k;
3610  buf[i] = 0;
3611  set_tty_echo(0);
3612  fprintf(stderr, "\n");
3613  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3614  fprintf(stderr,"error parsing debug value\n");
3615  }
3616  for(i=0;i<nb_input_streams;i++) {
3617  input_streams[i]->dec_ctx->debug = debug;
3618  }
3619  for(i=0;i<nb_output_streams;i++) {
3621  ost->enc_ctx->debug = debug;
3622  }
3623  if(debug) av_log_set_level(AV_LOG_DEBUG);
3624  fprintf(stderr,"debug=%d\n", debug);
3625  }
3626  if (key == '?'){
3627  fprintf(stderr, "key function\n"
3628  "? show this help\n"
3629  "+ increase verbosity\n"
3630  "- decrease verbosity\n"
3631  "c Send command to first matching filter supporting it\n"
3632  "C Send/Queue command to all matching filters\n"
3633  "D cycle through available debug modes\n"
3634  "h dump packets/hex press to cycle through the 3 states\n"
3635  "q quit\n"
3636  "s Show QP histogram\n"
3637  );
3638  }
3639  return 0;
3640 }
3641 
3642 #if HAVE_THREADS
3643 static void *input_thread(void *arg)
3644 {
3645  InputFile *f = arg;
3646  AVPacket *pkt = f->pkt, *queue_pkt;
3647  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3648  int ret = 0;
3649 
3650  while (1) {
3651  ret = av_read_frame(f->ctx, pkt);
3652 
3653  if (ret == AVERROR(EAGAIN)) {
3654  av_usleep(10000);
3655  continue;
3656  }
3657  if (ret < 0) {
3658  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3659  break;
3660  }
3661  queue_pkt = av_packet_alloc();
3662  if (!queue_pkt) {
3664  av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
3665  break;
3666  }
3667  av_packet_move_ref(queue_pkt, pkt);
3668  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3669  if (flags && ret == AVERROR(EAGAIN)) {
3670  flags = 0;
3671  ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3672  av_log(f->ctx, AV_LOG_WARNING,
3673  "Thread message queue blocking; consider raising the "
3674  "thread_queue_size option (current value: %d)\n",
3675  f->thread_queue_size);
3676  }
3677  if (ret < 0) {
3678  if (ret != AVERROR_EOF)
3679  av_log(f->ctx, AV_LOG_ERROR,
3680  "Unable to send packet to main thread: %s\n",
3681  av_err2str(ret));
3682  av_packet_free(&queue_pkt);
3683  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3684  break;
3685  }
3686  }
3687 
3688  return NULL;
3689 }
3690 
3691 static void free_input_thread(int i)
3692 {
3693  InputFile *f = input_files[i];
3694  AVPacket *pkt;
3695 
3696  if (!f || !f->in_thread_queue)
3697  return;
3699  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3700  av_packet_free(&pkt);
3701 
3702  pthread_join(f->thread, NULL);
3703  f->joined = 1;
3704  av_thread_message_queue_free(&f->in_thread_queue);
3705 }
3706 
3707 static void free_input_threads(void)
3708 {
3709  int i;
3710 
3711  for (i = 0; i < nb_input_files; i++)
3712  free_input_thread(i);
3713 }
3714 
3715 static int init_input_thread(int i)
3716 {
3717  int ret;
3718  InputFile *f = input_files[i];
3719 
3720  if (f->thread_queue_size < 0)
3721  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
3722  if (!f->thread_queue_size)
3723  return 0;
3724 
3725  if (f->ctx->pb ? !f->ctx->pb->seekable :
3726  strcmp(f->ctx->iformat->name, "lavfi"))
3727  f->non_blocking = 1;
3728  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3729  f->thread_queue_size, sizeof(f->pkt));
3730  if (ret < 0)
3731  return ret;
3732 
3733  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3734  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3735  av_thread_message_queue_free(&f->in_thread_queue);
3736  return AVERROR(ret);
3737  }
3738 
3739  return 0;
3740 }
3741 
3742 static int init_input_threads(void)
3743 {
3744  int i, ret;
3745 
3746  for (i = 0; i < nb_input_files; i++) {
3747  ret = init_input_thread(i);
3748  if (ret < 0)
3749  return ret;
3750  }
3751  return 0;
3752 }
3753 
3754 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
3755 {
3756  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3757  f->non_blocking ?
3759 }
3760 #endif
3761 
3763 {
3764  if (f->readrate || f->rate_emu) {
3765  int i;
3766  int64_t file_start = copy_ts * (
3767  (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
3768  (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
3769  );
3770  float scale = f->rate_emu ? 1.0 : f->readrate;
3771  for (i = 0; i < f->nb_streams; i++) {
3772  InputStream *ist = input_streams[f->ist_index + i];
3773  int64_t stream_ts_offset, pts, now;
3774  if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
3775  stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
3776  pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3777  now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
3778  if (pts > now)
3779  return AVERROR(EAGAIN);
3780  }
3781  }
3782 
3783 #if HAVE_THREADS
3784  if (f->thread_queue_size)
3785  return get_input_packet_mt(f, pkt);
3786 #endif
3787  *pkt = f->pkt;
3788  return av_read_frame(f->ctx, *pkt);
3789 }
3790 
3791 static int got_eagain(void)
3792 {
3793  int i;
3794  for (i = 0; i < nb_output_streams; i++)
3795  if (output_streams[i]->unavailable)
3796  return 1;
3797  return 0;
3798 }
3799 
3800 static void reset_eagain(void)
3801 {
3802  int i;
3803  for (i = 0; i < nb_input_files; i++)
3804  input_files[i]->eagain = 0;
3805  for (i = 0; i < nb_output_streams; i++)
3806  output_streams[i]->unavailable = 0;
3807 }
3808 
3809 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3810 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3811  AVRational time_base)
3812 {
3813  int ret;
3814 
3815  if (!*duration) {
3816  *duration = tmp;
3817  return tmp_time_base;
3818  }
3819 
3820  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3821  if (ret < 0) {
3822  *duration = tmp;
3823  return tmp_time_base;
3824  }
3825 
3826  return time_base;
3827 }
3828 
3830 {
3831  InputStream *ist;
3832  AVCodecContext *avctx;
3833  int i, ret, has_audio = 0;
3834  int64_t duration = 0;
3835 
3836  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
3837  if (ret < 0)
3838  return ret;
3839 
3840  for (i = 0; i < ifile->nb_streams; i++) {
3841  ist = input_streams[ifile->ist_index + i];
3842  avctx = ist->dec_ctx;
3843 
3844  /* duration is the length of the last frame in a stream
3845  * when audio stream is present we don't care about
3846  * last video frame length because it's not defined exactly */
3847  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3848  has_audio = 1;
3849  }
3850 
3851  for (i = 0; i < ifile->nb_streams; i++) {
3852  ist = input_streams[ifile->ist_index + i];
3853  avctx = ist->dec_ctx;
3854 
3855  if (has_audio) {
3856  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3857  AVRational sample_rate = {1, avctx->sample_rate};
3858 
3860  } else {
3861  continue;
3862  }
3863  } else {
3864  if (ist->framerate.num) {
3865  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
3866  } else if (ist->st->avg_frame_rate.num) {
3868  } else {
3869  duration = 1;
3870  }
3871  }
3872  if (!ifile->duration)
3873  ifile->time_base = ist->st->time_base;
3874  /* the total duration of the stream, max_pts - min_pts is
3875  * the duration of the stream without the last frame */
3876  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
3877  duration += ist->max_pts - ist->min_pts;
3878  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3879  ifile->time_base);
3880  }
3881 
3882  if (ifile->loop > 0)
3883  ifile->loop--;
3884 
3885  return ret;
3886 }
3887 
3888 /*
3889  * Return
3890  * - 0 -- one packet was read and processed
3891  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3892  * this function should be called again
3893  * - AVERROR_EOF -- this function should not be called again
3894  */
3895 static int process_input(int file_index)
3896 {
3897  InputFile *ifile = input_files[file_index];
3899  InputStream *ist;
3900  AVPacket *pkt;
3901  int ret, thread_ret, i, j;
3902  int64_t duration;
3903  int64_t pkt_dts;
3904  int disable_discontinuity_correction = copy_ts;
3905 
3906  is = ifile->ctx;
3908 
3909  if (ret == AVERROR(EAGAIN)) {
3910  ifile->eagain = 1;
3911  return ret;
3912  }
3913  if (ret < 0 && ifile->loop) {
3914  AVCodecContext *avctx;
3915  for (i = 0; i < ifile->nb_streams; i++) {
3916  ist = input_streams[ifile->ist_index + i];
3917  avctx = ist->dec_ctx;
3918  if (ist->processing_needed) {
3919  ret = process_input_packet(ist, NULL, 1);
3920  if (ret>0)
3921  return 0;
3922  if (ist->decoding_needed)
3923  avcodec_flush_buffers(avctx);
3924  }
3925  }
3926 #if HAVE_THREADS
3927  free_input_thread(file_index);
3928 #endif
3929  ret = seek_to_start(ifile, is);
3930 #if HAVE_THREADS
3931  thread_ret = init_input_thread(file_index);
3932  if (thread_ret < 0)
3933  return thread_ret;
3934 #endif
3935  if (ret < 0)
3936  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
3937  else
3939  if (ret == AVERROR(EAGAIN)) {
3940  ifile->eagain = 1;
3941  return ret;
3942  }
3943  }
3944  if (ret < 0) {
3945  if (ret != AVERROR_EOF) {
3946  print_error(is->url, ret);
3947  if (exit_on_error)
3948  exit_program(1);
3949  }
3950 
3951  for (i = 0; i < ifile->nb_streams; i++) {
3952  ist = input_streams[ifile->ist_index + i];
3953  if (ist->processing_needed) {
3954  ret = process_input_packet(ist, NULL, 0);
3955  if (ret>0)
3956  return 0;
3957  }
3958 
3959  /* mark all outputs that don't go through lavfi as finished */
3960  for (j = 0; j < nb_output_streams; j++) {
3962 
3963  if (ost->source_index == ifile->ist_index + i &&
3964  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3966  }
3967  }
3968 
3969  ifile->eof_reached = 1;
3970  return AVERROR(EAGAIN);
3971  }
3972 
3973  reset_eagain();
3974 
3975  if (do_pkt_dump) {
3977  is->streams[pkt->stream_index]);
3978  }
3979  /* the following test is needed in case new streams appear
3980  dynamically in stream : we ignore them */
3981  if (pkt->stream_index >= ifile->nb_streams) {
3982  report_new_stream(file_index, pkt);
3983  goto discard_packet;
3984  }
3985 
3986  ist = input_streams[ifile->ist_index + pkt->stream_index];
3987 
3988  ist->data_size += pkt->size;
3989  ist->nb_packets++;
3990 
3991  if (ist->discard)
3992  goto discard_packet;
3993 
3994  if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
3996  "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
3997  if (exit_on_error)
3998  exit_program(1);
3999  }
4000 
4001  if (debug_ts) {
4002  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4003  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4012  }
4013 
4014  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4015  int64_t stime, stime2;
4016  // Correcting starttime based on the enabled streams
4017  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4018  // so we instead do it here as part of discontinuity handling
4019  if ( ist->next_dts == AV_NOPTS_VALUE
4020  && ifile->ts_offset == -is->start_time
4021  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4022  int64_t new_start_time = INT64_MAX;
4023  for (i=0; i<is->nb_streams; i++) {
4024  AVStream *st = is->streams[i];
4025  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4026  continue;
4027  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4028  }
4029  if (new_start_time > is->start_time) {
4030  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4031  ifile->ts_offset = -new_start_time;
4032  }
4033  }
4034 
4035  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4036  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4037  ist->wrap_correction_done = 1;
4038 
4039  if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4040  pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4041  ist->wrap_correction_done = 0;
4042  }
4043  if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4044  pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4045  ist->wrap_correction_done = 0;
4046  }
4047  }
4048 
4049  /* add the stream-global side data to the first packet */
4050  if (ist->nb_packets == 1) {
4051  for (i = 0; i < ist->st->nb_side_data; i++) {
4052  AVPacketSideData *src_sd = &ist->st->side_data[i];
4053  uint8_t *dst_data;
4054 
4055  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4056  continue;
4057 
4058  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4059  continue;
4060 
4061  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4062  if (!dst_data)
4063  exit_program(1);
4064 
4065  memcpy(dst_data, src_sd->data, src_sd->size);
4066  }
4067  }
4068 
4069  if (pkt->dts != AV_NOPTS_VALUE)
4070  pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4071  if (pkt->pts != AV_NOPTS_VALUE)
4072  pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4073 
4074  if (pkt->pts != AV_NOPTS_VALUE)
4075  pkt->pts *= ist->ts_scale;
4076  if (pkt->dts != AV_NOPTS_VALUE)
4077  pkt->dts *= ist->ts_scale;
4078 
4080  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4082  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4083  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4084  int64_t delta = pkt_dts - ifile->last_ts;
4085  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4087  ifile->ts_offset -= delta;
4089  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4090  delta, ifile->ts_offset);
4092  if (pkt->pts != AV_NOPTS_VALUE)
4094  }
4095  }
4096 
4097  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4098  if (pkt->pts != AV_NOPTS_VALUE) {
4099  pkt->pts += duration;
4100  ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4101  ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4102  }
4103 
4104  if (pkt->dts != AV_NOPTS_VALUE)
4105  pkt->dts += duration;
4106 
4108 
4109  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4110  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4111  int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4112  ist->st->time_base, AV_TIME_BASE_Q,
4114  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4115  disable_discontinuity_correction = 0;
4116  }
4117 
4118  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4120  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4121  !disable_discontinuity_correction) {
4122  int64_t delta = pkt_dts - ist->next_dts;
4123  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4124  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4126  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4127  ifile->ts_offset -= delta;
4129  "timestamp discontinuity for stream #%d:%d "
4130  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4131  ist->file_index, ist->st->index, ist->st->id,
4133  delta, ifile->ts_offset);
4135  if (pkt->pts != AV_NOPTS_VALUE)
4137  }
4138  } else {
4139  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4141  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4142  pkt->dts = AV_NOPTS_VALUE;
4143  }
4144  if (pkt->pts != AV_NOPTS_VALUE){
4145  int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4146  delta = pkt_pts - ist->next_dts;
4147  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4149  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4150  pkt->pts = AV_NOPTS_VALUE;
4151  }
4152  }
4153  }
4154  }
4155 
4156  if (pkt->dts != AV_NOPTS_VALUE)
4157  ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4158 
4159  if (debug_ts) {
4160  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4167  }
4168 
4169  sub2video_heartbeat(ist, pkt->pts);
4170 
4171  process_input_packet(ist, pkt, 0);
4172 
4173 discard_packet:
4174 #if HAVE_THREADS
4175  if (ifile->thread_queue_size)
4176  av_packet_free(&pkt);
4177  else
4178 #endif
4180 
4181  return 0;
4182 }
4183 
4184 /**
4185  * Perform a step of transcoding for the specified filter graph.
4186  *
4187  * @param[in] graph filter graph to consider
4188  * @param[out] best_ist input stream where a frame would allow to continue
4189  * @return 0 for success, <0 for error
4190  */
4191 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4192 {
4193  int i, ret;
4194  int nb_requests, nb_requests_max = 0;
4195  InputFilter *ifilter;
4196  InputStream *ist;
4197 
4198  *best_ist = NULL;
4200  if (ret >= 0)
4201  return reap_filters(0);
4202 
4203  if (ret == AVERROR_EOF) {
4204  ret = reap_filters(1);
4205  for (i = 0; i < graph->nb_outputs; i++)
4206  close_output_stream(graph->outputs[i]->ost);
4207  return ret;
4208  }
4209  if (ret != AVERROR(EAGAIN))
4210  return ret;
4211 
4212  for (i = 0; i < graph->nb_inputs; i++) {
4213  ifilter = graph->inputs[i];
4214  ist = ifilter->ist;
4215  if (input_files[ist->file_index]->eagain ||
4217  continue;
4218  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4219  if (nb_requests > nb_requests_max) {
4220  nb_requests_max = nb_requests;
4221  *best_ist = ist;
4222  }
4223  }
4224 
4225  if (!*best_ist)
4226  for (i = 0; i < graph->nb_outputs; i++)
4227  graph->outputs[i]->ost->unavailable = 1;
4228 
4229  return 0;
4230 }
4231 
4232 /**
4233  * Run a single step of transcoding.
4234  *
4235  * @return 0 for success, <0 for error
4236  */
4237 static int transcode_step(void)
4238 {
4239  OutputStream *ost;
4240  InputStream *ist = NULL;
4241  int ret;
4242 
4243  ost = choose_output();
4244  if (!ost) {
4245  if (got_eagain()) {
4246  reset_eagain();
4247  av_usleep(10000);
4248  return 0;
4249  }
4250  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4251  return AVERROR_EOF;
4252  }
4253 
4254  if (ost->filter && !ost->filter->graph->graph) {
4255  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4256  ret = configure_filtergraph(ost->filter->graph);
4257  if (ret < 0) {
4258  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4259  return ret;
4260  }
4261  }
4262  }
4263 
4264  if (ost->filter && ost->filter->graph->graph) {
4265  /*
4266  * Similar case to the early audio initialization in reap_filters.
4267  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4268  * audio frame buffering/creation to get the output audio frame size
4269  * in samples correct. The audio frame size for the filter chain is
4270  * configured during the output stream initialization.
4271  *
4272  * Apparently avfilter_graph_request_oldest (called in
4273  * transcode_from_filter just down the line) peeks. Peeking already
4274  * puts one frame "ready to be given out", which means that any
4275  * update in filter buffer sink configuration afterwards will not
4276  * help us. And yes, even if it would be utilized,
4277  * av_buffersink_get_samples is affected, as it internally utilizes
4278  * the same early exit for peeked frames.
4279  *
4280  * In other words, if avfilter_graph_request_oldest would not make
4281  * further filter chain configuration or usage of
4282  * av_buffersink_get_samples useless (by just causing the return
4283  * of the peeked AVFrame as-is), we could get rid of this additional
4284  * early encoder initialization.
4285  */
4286  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4288 
4289  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4290  return ret;
4291  if (!ist)
4292  return 0;
4293  } else if (ost->filter) {
4294  int i;
4295  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4296  InputFilter *ifilter = ost->filter->graph->inputs[i];
4297  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4298  ist = ifilter->ist;
4299  break;
4300  }
4301  }
4302  if (!ist) {
4303  ost->inputs_done = 1;
4304  return 0;
4305  }
4306  } else {
4307  av_assert0(ost->source_index >= 0);
4308  ist = input_streams[ost->source_index];
4309  }
4310 
4311  ret = process_input(ist->file_index);
4312  if (ret == AVERROR(EAGAIN)) {
4313  if (input_files[ist->file_index]->eagain)
4314  ost->unavailable = 1;
4315  return 0;
4316  }
4317 
4318  if (ret < 0)
4319  return ret == AVERROR_EOF ? 0 : ret;
4320 
4321  return reap_filters(0);
4322 }
4323 
4324 /*
4325  * The following code is the main loop of the file converter
4326  */
4327 static int transcode(void)
4328 {
4329  int ret, i;
4330  AVFormatContext *os;
4331  OutputStream *ost;
4332  InputStream *ist;
4333  int64_t timer_start;
4334  int64_t total_packets_written = 0;
4335 
4336  ret = transcode_init();
4337  if (ret < 0)
4338  goto fail;
4339 
4340  if (stdin_interaction) {
4341  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4342  }
4343 
4344  timer_start = av_gettime_relative();
4345 
4346 #if HAVE_THREADS
4347  if ((ret = init_input_threads()) < 0)
4348  goto fail;
4349 #endif
4350 
4351  while (!received_sigterm) {
4352  int64_t cur_time= av_gettime_relative();
4353 
4354  /* if 'q' pressed, exits */
4355  if (stdin_interaction)
4356  if (check_keyboard_interaction(cur_time) < 0)
4357  break;
4358 
4359  /* check if there's any stream where output is still needed */
4360  if (!need_output()) {
4361  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4362  break;
4363  }
4364 
4365  ret = transcode_step();
4366  if (ret < 0 && ret != AVERROR_EOF) {
4367  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4368  break;
4369  }
4370 
4371  /* dump report by using the output first video and audio streams */
4372  print_report(0, timer_start, cur_time);
4373  }
4374 #if HAVE_THREADS
4375  free_input_threads();
4376 #endif
4377 
4378  /* at the end of stream, we must flush the decoder buffers */
4379  for (i = 0; i < nb_input_streams; i++) {
4380  ist = input_streams[i];
4381  if (!input_files[ist->file_index]->eof_reached) {
4382  process_input_packet(ist, NULL, 0);
4383  }
4384  }
4385  flush_encoders();
4386 
4387  term_exit();
4388 
4389  /* write the trailer if needed */
4390  for (i = 0; i < nb_output_files; i++) {
4392  if (ret < 0 && exit_on_error)
4393  exit_program(1);
4394  }
4395 
4396  /* dump report by using the first video and audio streams */
4397  print_report(1, timer_start, av_gettime_relative());
4398 
4399  /* close the output files */
4400  for (i = 0; i < nb_output_files; i++) {
4401  os = output_files[i]->ctx;
4402  if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
4403  if ((ret = avio_closep(&os->pb)) < 0) {
4404  av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
4405  if (exit_on_error)
4406  exit_program(1);
4407  }
4408  }
4409  }
4410 
4411  /* close each encoder */
4412  for (i = 0; i < nb_output_streams; i++) {
4413  ost = output_streams[i];
4414  if (ost->encoding_needed) {
4415  av_freep(&ost->enc_ctx->stats_in);
4416  }
4417  total_packets_written += ost->packets_written;
4418  if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4419  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4420  exit_program(1);
4421  }
4422  }
4423 
4424  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4425  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4426  exit_program(1);
4427  }
4428 
4429  /* close each decoder */
4430  for (i = 0; i < nb_input_streams; i++) {
4431  ist = input_streams[i];
4432  if (ist->decoding_needed) {
4433  avcodec_close(ist->dec_ctx);
4434  if (ist->hwaccel_uninit)
4435  ist->hwaccel_uninit(ist->dec_ctx);
4436  }
4437  }
4438 
4440 
4441  /* finished ! */
4442  ret = 0;
4443 
4444  fail:
4445 #if HAVE_THREADS
4446  free_input_threads();
4447 #endif
4448 
4449  if (output_streams) {
4450  for (i = 0; i < nb_output_streams; i++) {
4451  ost = output_streams[i];
4452  if (ost) {
4453  if (ost->logfile) {
4454  if (fclose(ost->logfile))
4456  "Error closing logfile, loss of information possible: %s\n",
4457  av_err2str(AVERROR(errno)));
4458  ost->logfile = NULL;
4459  }
4460  av_freep(&ost->forced_kf_pts);
4461  av_freep(&ost->apad);
4463  av_dict_free(&ost->encoder_opts);
4464  av_dict_free(&ost->sws_dict);
4465  av_dict_free(&ost->swr_opts);
4466  }
4467  }
4468  }
4469  return ret;
4470 }
4471 
4473 {
4474  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4475 #if HAVE_GETRUSAGE
4476  struct rusage rusage;
4477 
4478  getrusage(RUSAGE_SELF, &rusage);
4479  time_stamps.user_usec =
4480  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4481  time_stamps.sys_usec =
4482  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4483 #elif HAVE_GETPROCESSTIMES
4484  HANDLE proc;
4485  FILETIME c, e, k, u;
4486  proc = GetCurrentProcess();
4487  GetProcessTimes(proc, &c, &e, &k, &u);
4488  time_stamps.user_usec =
4489  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4490  time_stamps.sys_usec =
4491  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4492 #else
4493  time_stamps.user_usec = time_stamps.sys_usec = 0;
4494 #endif
4495  return time_stamps;
4496 }
4497 
4498 static int64_t getmaxrss(void)
4499 {
4500 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4501  struct rusage rusage;
4502  getrusage(RUSAGE_SELF, &rusage);
4503  return (int64_t)rusage.ru_maxrss * 1024;
4504 #elif HAVE_GETPROCESSMEMORYINFO
4505  HANDLE proc;
4506  PROCESS_MEMORY_COUNTERS memcounters;
4507  proc = GetCurrentProcess();
4508  memcounters.cb = sizeof(memcounters);
4509  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4510  return memcounters.PeakPagefileUsage;
4511 #else
4512  return 0;
4513 #endif
4514 }
4515 
4516 int main(int argc, char **argv)
4517 {
4518  int i, ret;
4520 
4521  init_dynload();
4522 
4524 
4525  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4526 
4528  parse_loglevel(argc, argv, options);
4529 
4530 #if CONFIG_AVDEVICE
4532 #endif
4534 
4535  show_banner(argc, argv, options);
4536 
4537  /* parse options and open all input/output files */
4538  ret = ffmpeg_parse_options(argc, argv);
4539  if (ret < 0)
4540  exit_program(1);
4541 
4542  if (nb_output_files <= 0 && nb_input_files == 0) {
4543  show_usage();
4544  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4545  exit_program(1);
4546  }
4547 
4548  /* file converter / grab */
4549  if (nb_output_files <= 0) {
4550  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4551  exit_program(1);
4552  }
4553 
4554  for (i = 0; i < nb_output_files; i++) {
4555  if (strcmp(output_files[i]->format->name, "rtp"))
4556  want_sdp = 0;
4557  }
4558 
4560  if (transcode() < 0)
4561  exit_program(1);
4562  if (do_benchmark) {
4563  int64_t utime, stime, rtime;
4565  utime = current_time.user_usec - ti.user_usec;
4566  stime = current_time.sys_usec - ti.sys_usec;
4567  rtime = current_time.real_usec - ti.real_usec;
4569  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4570  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4571  }
4572  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4575  exit_program(69);
4576 
4578  return main_return_code;
4579 }
error
static void error(const char *err)
Definition: target_bsf_fuzzer.c:31
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
AVSubtitle
Definition: avcodec.h:2305
avcodec_close
av_cold int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: avcodec.c:440
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:146
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1026
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:255
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:422
AVCodec
AVCodec.
Definition: codec.h:196
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:318
pthread_join
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:749
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:241
need_output
static int need_output(void)
Definition: ffmpeg.c:3458
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:160
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1794
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:832
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:74
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: codec_id.h:430
ifilter_parameters_from_codecpar
static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1717
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:125
av_clip
#define av_clip
Definition: common.h:95
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:442
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:3800
InputStream::hwaccel_device
char * hwaccel_device
Definition: ffmpeg.h:379
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:390
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:54
seek_to_start
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:3829
r
const char * r
Definition: vf_curves.c:116
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:150
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:993
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:345
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
Callback to negotiate the pixel format.
Definition: avcodec.h:653
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:57
forced_keyframes_const_names
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:115
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:43
AVCodecContext::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:966
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
InputFilter::width
int width
Definition: ffmpeg.h:254
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1142
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
InputFilter::displaymatrix
int32_t * displaymatrix
Definition: ffmpeg.h:261
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:254
init_output_stream
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
Definition: ffmpeg.c:3117
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:998
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:441
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:684
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:292
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1431
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:53
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:391
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
OutputStream::enc_ctx
AVCodecContext * enc_ctx
Definition: ffmpeg.h:479
sub
static float sub(float src0, float src1)
Definition: dnn_backend_native_layer_mathbinary.c:31
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:91
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2662
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:484
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:300
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:879
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:1010
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:146
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:280
InputFile::nb_streams_warn
int nb_streams_warn
Definition: ffmpeg.h:422
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: codec_par.c:99
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:658
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:162
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:316
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4237
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:126
AVSubtitleRect
Definition: avcodec.h:2277
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2309
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:62
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:111
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:595
InputFilter::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg.h:258
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:325
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AV_THREAD_MESSAGE_NONBLOCK
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1281
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:432
AVCodecContext::color_trc
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:959
AVFrame::width
int width
Definition: frame.h:397
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:533
AVPacketSideData
Definition: packet.h:315
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: codec.h:215
w
uint8_t w
Definition: llviddspenc.c:38
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:816
FKF_T
@ FKF_T
Definition: ffmpeg.h:443
AVPacket::data
uint8_t * data
Definition: packet.h:374
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:57
finish_output_stream
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1294
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:995
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:58
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:482
dup_warning
static uint64_t dup_warning
Definition: ffmpeg.c:135
AVOption
AVOption.
Definition: opt.h:251
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:34
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:159
FilterGraph::index
int index
Definition: ffmpeg.h:291
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:1028
nb_output_dumped
unsigned nb_output_dumped
Definition: ffmpeg.c:138
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:372
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:613
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:68
avcodec_parameters_free
void avcodec_parameters_free(AVCodecParameters **ppar)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: codec_par.c:63
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
version.h
transcode
static int transcode(void)
Definition: ffmpeg.c:4327
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:127
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:143
AV_NOWARN_DEPRECATED
#define AV_NOWARN_DEPRECATED(code)
Disable warnings about deprecated features This is useful for sections of code kept for backward comp...
Definition: attributes.h:126
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:3499
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:974
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:301
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure clockwise rotation by the specified angle (in de...
Definition: display.c:51
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:392
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:65
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:571
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:253
check_recording_time
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:751
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1438
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2088
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:111
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:311
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:300
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:287
av_bsf_free
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:53
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1735
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:352
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
tf_sess_config.config
config
Definition: tf_sess_config.py:33
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:429
sample_rate
sample_rate
Definition: ffmpeg_filter.c:153
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:174
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:73
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:328
AVBSFContext
The bitstream filter state.
Definition: bsf.h:68
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:2954
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:157
want_sdp
int want_sdp
Definition: ffmpeg.c:140
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:59
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:119
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:637
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:244
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:406
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:93
InputStream
Definition: ffmpeg.h:306
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:177
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:1732
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:170
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:368
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:3791
AVPacketSideData::size
size_t size
Definition: packet.h:317
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2017
stats_period
int64_t stats_period
Definition: ffmpeg_opt.c:181
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:793
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:204
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:156
fifo.h
AV_FIELD_TT
@ AV_FIELD_TT
Definition: codec_par.h:40
finish
static void finish(void)
Definition: movenc.c:342
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:179
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:398
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:2056
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:131
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:247
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:65
samplefmt.h
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:346
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1140
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:505
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:333
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:417
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:469
AVChapter
Definition: avformat.h:1172
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:393
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:704
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
scale
static av_always_inline float scale(float x, float s)
Definition: vf_v360.c:1389
pts
static int64_t pts
Definition: transcode_aac.c:654
av_thread_message_queue_recv
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
Definition: threadmessage.c:174
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:317
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:428
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:998
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
AV_FIELD_TB
@ AV_FIELD_TB
Definition: codec_par.h:42
OutputFile::opts
AVDictionary * opts
Definition: ffmpeg.h:592
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:361
loop
static int loop
Definition: ffplay.c:340
do_pkt_dump
int do_pkt_dump
Definition: ffmpeg_opt.c:166
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:557
InputFile
Definition: ffmpeg.h:404
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:266
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: macros.h:45
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2707
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:418
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:72
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: codec_id.h:528
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:514
InputStream::first_dts
int64_t first_dts
dts of the first packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:326
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:386
OutputFile::shortest
int shortest
Definition: ffmpeg.h:598
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:327
AVCodecContext::color_primaries
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:952
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:448
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:430
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:158
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3488
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:125
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1766
float
float
Definition: af_crystalizer.c:122
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:64
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:685
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:55
input_streams
InputStream ** input_streams
Definition: ffmpeg.c:147
llrintf
#define llrintf(x)
Definition: libm.h:399
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: codec_desc.h:38
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:340
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:256
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:455
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4472
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:302
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:152
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:1926
copy_ts_first_pts
static int64_t copy_ts_first_pts
Definition: ffmpeg.c:347
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:702
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:347
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:225
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:137
AVFormatContext::chapters
AVChapter ** chapters
Definition: avformat.h:1432
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:106
AVDictionaryEntry::key
char * key
Definition: dict.h:80
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:453
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:127
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:521
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
InputFilter
Definition: ffmpeg.h:242
get_input_stream
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2666
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:639
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:407
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1811
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:296
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:268
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1448
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
AVPacketSideData::data
uint8_t * data
Definition: packet.h:316
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:454
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:371
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:464
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:176
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2311
of_write_packet
void of_write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg_mux.c:44
abort_codec_experimental
static void abort_codec_experimental(const AVCodec *c, int encoder)
Definition: ffmpeg.c:676
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:336
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:535
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:93
compare_int64
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2673
InputStream::hwaccel_retrieve_data
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:385
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: codec_id.h:494
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:245
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1397
key
const char * key
Definition: hwcontext_opencl.c:174
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
AV_FIELD_BT
@ AV_FIELD_BT
Definition: codec_par.h:43
NAN
#define NAN
Definition: mathematics.h:64
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:667
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: codec_id.h:77
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2364
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:907
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:3895
init_output_stream_wrapper
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
Definition: ffmpeg.c:811
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:399
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1771
arg
const char * arg
Definition: jacosubdec.c:67
pthread_create
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:64
avio_flush
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:252
AVCodecParserContext::repeat_pict
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:2807
output_streams
OutputStream ** output_streams
Definition: ffmpeg.c:152
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4191
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:54
InputStream::pts
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:330
AVFormatContext
Format I/O context.
Definition: avformat.h:1213
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:437
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:32
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:77
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1108
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:150
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1390
nb_frames_drop
static int64_t nb_frames_drop
Definition: ffmpeg.c:136
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:270
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:978
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:606
NULL
#define NULL
Definition: coverity.c:32
InputStream::sub2video::w
int w
Definition: ffmpeg.h:365
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:64
AVCodecContext::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:973
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:348
InputStream::st
AVStream * st
Definition: ffmpeg.h:308
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:231
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4516
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:681
AVCodec::type
enum AVMediaType type
Definition: codec.h:209
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2070
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2148
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
of_close
void of_close(OutputFile **pof)
Definition: ffmpeg_mux.c:302
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4498
InputStream::next_pts
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:329
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:318
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3527
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1255
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:447
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
parseutils.h
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:930
InputStream::hwaccel_id
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:377
mathops.h
duration_max
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:3810
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1141
main_return_code
int main_return_code
Definition: ffmpeg.c:346
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:113
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:960
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:1019
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:263
InputStream::fix_sub_duration
int fix_sub_duration
Definition: ffmpeg.h:353
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:134
double
double
Definition: af_crystalizer.c:132
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3276
AVCodecContext::thread_safe_callbacks
attribute_deprecated int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1502
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1294
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2552
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:74
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:115
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
time.h
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:343
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:164
nb_input_streams
int nb_input_streams
Definition: ffmpeg.c:148
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:430
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:212
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:479
InputStream::min_pts
int64_t min_pts
Definition: ffmpeg.h:335
InputStream::sub2video::sub_queue
AVFifo * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:363
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:303
swresample.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
InputStream::sub2video::initialize
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:366
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:177
input_files
InputFile ** input_files
Definition: ffmpeg.c:149
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:630
InputStream::frames_decoded
uint64_t frames_decoded
Definition: ffmpeg.h:395
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:325
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:598
FilterGraph
Definition: ffmpeg.h:290
print_stats
int print_stats
Definition: ffmpeg_opt.c:173
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1269
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:243
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:529
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:28
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:507
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:512
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:259
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1328
of_check_init
int of_check_init(OutputFile *of)
Definition: ffmpeg_mux.c:230
options
const OptionDef options[]
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1256
f
f
Definition: af_crystalizer.c:122
AVIOContext
Bytestream IO Context.
Definition: avio.h:162
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:422
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
InputStream::hwaccel_device_type
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:378
AVMediaType
AVMediaType
Definition: avutil.h:199
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:318
AVPacket::size
int size
Definition: packet.h:375
InputStream::wrap_correction_done
int wrap_correction_done
Definition: ffmpeg.h:331
InputStream::start
int64_t start
Definition: ffmpeg.h:322
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:49
threadmessage.h
InputStream::file_index
int file_index
Definition: ffmpeg.h:307
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
Definition: ffmpeg.c:1078
output_files
OutputFile ** output_files
Definition: ffmpeg.c:154
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:203
SIGNAL
#define SIGNAL(sig, func)
Definition: ffmpeg.c:403
parse_forced_key_frames
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2867
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:461
update_video_stats
static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
Definition: ffmpeg.c:837
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:342
start_time
static int64_t start_time
Definition: ffplay.c:331
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:294
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1296
InputStream::pkt
AVPacket * pkt
Definition: ffmpeg.h:319
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1014
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2348
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:1746
InputStream::got_output
int got_output
Definition: ffmpeg.h:355
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:64
size
int size
Definition: twinvq_data.h:10344
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:167
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
subtitle_out
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:169
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1214
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:657
InputStream::prev_sub
struct InputStream::@2 prev_sub
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
InputStream::hwaccel_retrieved_pix_fmt
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:387
AVFrameSideData::data
uint8_t * data
Definition: frame.h:233
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:542
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: avformat.c:691
OutputStream::source_index
int source_index
Definition: ffmpeg.h:460
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:312
sub2video_update
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:470
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:412
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:1017
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2308
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
AVFMT_NOSTREAMS
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:486
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:373
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:232
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:241
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:379
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
InputStream::samples_decoded
uint64_t samples_decoded
Definition: ffmpeg.h:396
OutputFile::limit_filesize
uint64_t limit_filesize
Definition: ffmpeg.h:596
InputStream::max_pts
int64_t max_pts
Definition: ffmpeg.h:336
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:380
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:62
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:163
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:526
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2260
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
nb_frames_dup
static int64_t nb_frames_dup
Definition: ffmpeg.c:134
av_thread_message_queue_alloc
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:42
decode
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2047
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:1057
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:210
guess_input_channel_layout
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1888
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:994
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:74
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:88
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:447
AV_FIELD_BB
@ AV_FIELD_BB
Definition: codec_par.h:41
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:63
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:57
InputStream::guess_layout_max
int guess_layout_max
Definition: ffmpeg.h:349
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:576
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1288
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:442
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:405
bprint.h
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:313
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:367
InputStream::ret
int ret
Definition: ffmpeg.h:356
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:559
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, size_t *size)
Get side information from packet.
Definition: avpacket.c:251
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:312
internal.h
AVCodecParameters::height
int height
Definition: codec_par.h:128
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: codec_par.c:182
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: codec_par.h:184
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:178
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:53
src2
const pixel * src2
Definition: h264pred_template.c:422
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
display.h
av_thread_message_queue_set_err_send
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
Definition: threadmessage.c:190
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:171
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:350
OutputFile::ost_index
int ost_index
Definition: ffmpeg.h:593
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:430
InputStream::hwaccel_uninit
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:384
InputStream::processing_needed
int processing_needed
Definition: ffmpeg.h:314
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:405
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:506
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:477
av_get_audio_frame_duration
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:814
transcode_subtitles
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2273
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:264
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1137
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVCodec::name
const char * name
Name of the codec implementation.
Definition: codec.h:203
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_PKT_DATA_CPB_PROPERTIES
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:146
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:980
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:512
AVCodecContext::height
int height
Definition: avcodec.h:562
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:357
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:599
nb_output_files
int nb_output_files
Definition: ffmpeg.c:155
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:468
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:1008
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:95
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:137
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:962
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:629
AVFMT_FLAG_BITEXACT
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1348
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:948
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:172
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:377
output_packet
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:725
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:246
AVFormatContext::oformat
const struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1232
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:96
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:56
normalize.ifile
ifile
Definition: normalize.py:6
encode_frame
static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:892
OutputFilter::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg.h:281
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:1061
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
AVCodecContext::opaque
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:431
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:374
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:288
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:407
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:764
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:427
InputFile::ist_index
int ist_index
Definition: ffmpeg.h:408
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:67
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:92
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:257
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:569
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1169
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:389
AVFrame::height
int height
Definition: frame.h:397
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:956
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:344
BenchmarkTimeStamps
Definition: ffmpeg.c:124
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: avformat.c:633
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:445
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
InputFilter::format
int format
Definition: ffmpeg.h:252
report_new_stream
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3261
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, size_t size)
Allocate new information of a packet.
Definition: avpacket.c:230
InputStream::prev_pkt_pts
int64_t prev_pkt_pts
Definition: ffmpeg.h:321
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
av_stream_get_end_pts
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: mux_utils.c:32
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:1907
avfilter.h
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:105
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1097
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:110
InputStream::nb_samples
int64_t nb_samples
Definition: ffmpeg.h:342
InputFilter::height
int height
Definition: ffmpeg.h:254
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:776
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:376
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:879
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:416
InputStream::discard
int discard
Definition: ffmpeg.h:309
AVFilterContext
An instance of a filter.
Definition: avfilter.h:408
AVCodecContext::debug
int debug
debug
Definition: avcodec.h:1322
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1507
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:121
VSYNC_AUTO
@ VSYNC_AUTO
Definition: ffmpeg.h:51
OutputFilter
Definition: ffmpeg.h:266
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:364
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:397
desc
const char * desc
Definition: libsvtav1.c:83
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:291
start_at_zero
int start_at_zero
Definition: ffmpeg_opt.c:168
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:483
audio_volume
int audio_volume
Definition: ffmpeg_opt.c:159
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:278
OutputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:591
OutputFilter::out_tmp
AVFilterInOut * out_tmp
Definition: ffmpeg.h:273
AVCodecParameters::video_delay
int video_delay
Video only.
Definition: codec_par.h:156
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: codec_par.h:39
InputStream::sub2video::h
int h
Definition: ffmpeg.h:365
llrint
#define llrint(x)
Definition: libm.h:394
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2826
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:231
AVCodecParameters::format
int format
Definition: codec_par.h:85
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:260
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
Definition: ffmpeg.c:1937
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:440
AVDictionaryEntry
Definition: dict.h:79
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:362
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:175
do_hex_dump
int do_hex_dump
Definition: ffmpeg_opt.c:165
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:61
InputStream::ts_scale
double ts_scale
Definition: ffmpeg.h:344
AVPacket
This structure stores compressed data.
Definition: packet.h:351
av_thread_message_queue_free
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:93
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
init_input_stream
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2601
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:394
get_input_packet
static int get_input_packet(InputFile *f, AVPacket **pkt)
Definition: ffmpeg.c:3762
InputStream::dts_buffer
int64_t * dts_buffer
Definition: ffmpeg.h:398
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *st, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
Definition: avformat.c:190
InputFilter::frame_queue
AVFifo * frame_queue
Definition: ffmpeg.h:249
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:158
d
d
Definition: ffmpeg_filter.c:153
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:562
int32_t
int32_t
Definition: audioconvert.c:56
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:122
imgutils.h
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: packet.h:133
timestamp.h
OutputStream
Definition: muxing.c:54
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
hwcontext.h
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
av_thread_message_queue_set_err_recv
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
Definition: threadmessage.c:201
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:86
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1184
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
codec_flags
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:39
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:157
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_stream_get_parser
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: demux_utils.c:32
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: codec.h:350
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
av_pkt_dump_log2
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:116
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3559
OutputFile::format
const AVOutputFormat * format
Definition: ffmpeg.h:589
InputStream::nb_dts_buffer
int nb_dts_buffer
Definition: ffmpeg.h:399
InputStream::saw_first_ts
int saw_first_ts
Definition: ffmpeg.h:345
AVDictionaryEntry::value
char * value
Definition: dict.h:81
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:988
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3515
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:420
FKF_N
@ FKF_N
Definition: ffmpeg.h:439
AVStream::pts_wrap_bits
int pts_wrap_bits
Number of bits in timestamps.
Definition: avformat.h:1117
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:594
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
InputStream::dec
const AVCodec * dec
Definition: ffmpeg.h:317
snprintf
#define snprintf
Definition: snprintf.h:34
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:447
read_key
static int read_key(void)
Definition: ffmpeg.c:456
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1313
buffersrc.h
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:357
of_write_trailer
int of_write_trailer(OutputFile *of)
Definition: ffmpeg_mux.c:281
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:759
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
init_output_bsfs
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2678
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1201
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:2930
dec_ctx
static AVCodecContext * dec_ctx
Definition: filtering_audio.c:44
nb_output_streams
int nb_output_streams
Definition: ffmpeg.c:153
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:237
OutputFile
Definition: ffmpeg.h:586
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:351
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:64