FFmpeg
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
499  sizeof(frame), NULL);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
528  }
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
546  }
547  for (i = 0; i < nb_output_streams; i++) {
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
557  av_frame_free(&ost->filtered_frame);
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
562  av_expr_free(ost->forced_keyframes_pexpr);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
566  av_freep(&ost->audio_channels_map);
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570  av_dict_free(&ost->swr_opts);
571 
572  avcodec_free_context(&ost->enc_ctx);
573  avcodec_parameters_free(&ost->ref_par);
574 
575  if (ost->muxing_queue) {
576  while (av_fifo_size(ost->muxing_queue)) {
577  AVPacket pkt;
578  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
580  }
581  av_fifo_freep(&ost->muxing_queue);
582  }
583 
585  }
586 #if HAVE_THREADS
587  free_input_threads();
588 #endif
589  for (i = 0; i < nb_input_files; i++) {
592  }
593  for (i = 0; i < nb_input_streams; i++) {
594  InputStream *ist = input_streams[i];
595 
598  av_dict_free(&ist->decoder_opts);
601  av_freep(&ist->filters);
602  av_freep(&ist->hwaccel_device);
603  av_freep(&ist->dts_buffer);
604 
606 
608  }
609 
610  if (vstats_file) {
611  if (fclose(vstats_file))
613  "Error closing vstats file, loss of information possible: %s\n",
614  av_err2str(AVERROR(errno)));
615  }
617 
622 
623  uninit_opts();
624 
626 
627  if (received_sigterm) {
628  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629  (int) received_sigterm);
630  } else if (ret && atomic_load(&transcode_init_done)) {
631  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632  }
633  term_exit();
634  ffmpeg_exited = 1;
635 }
636 
638 {
639  AVDictionaryEntry *t = NULL;
640 
641  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
643  }
644 }
645 
647 {
649  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
651  exit_program(1);
652  }
653 }
654 
655 static void abort_codec_experimental(AVCodec *c, int encoder)
656 {
657  exit_program(1);
658 }
659 
660 static void update_benchmark(const char *fmt, ...)
661 {
662  if (do_benchmark_all) {
664  va_list va;
665  char buf[1024];
666 
667  if (fmt) {
668  va_start(va, fmt);
669  vsnprintf(buf, sizeof(buf), fmt, va);
670  va_end(va);
672  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
676  }
677  current_time = t;
678  }
679 }
680 
682 {
683  int i;
684  for (i = 0; i < nb_output_streams; i++) {
685  OutputStream *ost2 = output_streams[i];
686  ost2->finished |= ost == ost2 ? this_stream : others;
687  }
688 }
689 
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 {
692  AVFormatContext *s = of->ctx;
693  AVStream *st = ost->st;
694  int ret;
695 
696  /*
697  * Audio encoders may split the packets -- #frames in != #packets out.
698  * But there is no reordering, so we can limit the number of output packets
699  * by simply dropping them here.
700  * Counting encoded video frames needs to be done separately because of
701  * reordering, see do_video_out().
702  * Do not count the packet when unqueued because it has been counted when queued.
703  */
704  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705  if (ost->frame_number >= ost->max_frames) {
707  return;
708  }
709  ost->frame_number++;
710  }
711 
712  if (!of->header_written) {
713  AVPacket tmp_pkt = {0};
714  /* the muxer is not initialized yet, buffer the packet */
715  if (!av_fifo_space(ost->muxing_queue)) {
716  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717  ost->max_muxing_queue_size);
718  if (new_size <= av_fifo_size(ost->muxing_queue)) {
720  "Too many packets buffered for output stream %d:%d.\n",
721  ost->file_index, ost->st->index);
722  exit_program(1);
723  }
724  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
725  if (ret < 0)
726  exit_program(1);
727  }
729  if (ret < 0)
730  exit_program(1);
731  av_packet_move_ref(&tmp_pkt, pkt);
732  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
733  return;
734  }
735 
738  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 
740  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741  int i;
743  NULL);
744  ost->quality = sd ? AV_RL32(sd) : -1;
745  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 
747  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748  if (sd && i < sd[5])
749  ost->error[i] = AV_RL64(sd + 8 + 8*i);
750  else
751  ost->error[i] = -1;
752  }
753 
754  if (ost->frame_rate.num && ost->is_cfr) {
755  if (pkt->duration > 0)
756  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
758  ost->mux_timebase);
759  }
760  }
761 
762  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 
764  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765  if (pkt->dts != AV_NOPTS_VALUE &&
766  pkt->pts != AV_NOPTS_VALUE &&
767  pkt->dts > pkt->pts) {
768  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769  pkt->dts, pkt->pts,
770  ost->file_index, ost->st->index);
771  pkt->pts =
772  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775  }
777  pkt->dts != AV_NOPTS_VALUE &&
778  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779  ost->last_mux_dts != AV_NOPTS_VALUE) {
780  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781  if (pkt->dts < max) {
782  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783  av_log(s, loglevel, "Non-monotonous DTS in output stream "
784  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786  if (exit_on_error) {
787  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
788  exit_program(1);
789  }
790  av_log(s, loglevel, "changing to %"PRId64". This may result "
791  "in incorrect timestamps in the output file.\n",
792  max);
793  if (pkt->pts >= pkt->dts)
794  pkt->pts = FFMAX(pkt->pts, max);
795  pkt->dts = max;
796  }
797  }
798  }
799  ost->last_mux_dts = pkt->dts;
800 
801  ost->data_size += pkt->size;
802  ost->packets_written++;
803 
805 
806  if (debug_ts) {
807  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
809  av_get_media_type_string(ost->enc_ctx->codec_type),
812  pkt->size
813  );
814  }
815 
817  if (ret < 0) {
818  print_error("av_interleaved_write_frame()", ret);
819  main_return_code = 1;
821  }
823 }
824 
826 {
827  OutputFile *of = output_files[ost->file_index];
828 
829  ost->finished |= ENCODER_FINISHED;
830  if (of->shortest) {
831  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
833  }
834 }
835 
836 /*
837  * Send a single packet to the output, applying any bitstream filters
838  * associated with the output stream. This may result in any number
839  * of packets actually being written, depending on what bitstream
840  * filters are applied. The supplied packet is consumed and will be
841  * blank (as if newly-allocated) when this function returns.
842  *
843  * If eof is set, instead indicate EOF to all bitstream filters and
844  * therefore flush any delayed packets to the output. A blank packet
845  * must be supplied in this case.
846  */
848  OutputStream *ost, int eof)
849 {
850  int ret = 0;
851 
852  /* apply the output bitstream filters, if any */
853  if (ost->nb_bitstream_filters) {
854  int idx;
855 
856  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
857  if (ret < 0)
858  goto finish;
859 
860  eof = 0;
861  idx = 1;
862  while (idx) {
863  /* get a packet from the previous filter up the chain */
864  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865  if (ret == AVERROR(EAGAIN)) {
866  ret = 0;
867  idx--;
868  continue;
869  } else if (ret == AVERROR_EOF) {
870  eof = 1;
871  } else if (ret < 0)
872  goto finish;
873 
874  /* send it to the next filter down the chain or to the muxer */
875  if (idx < ost->nb_bitstream_filters) {
876  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
877  if (ret < 0)
878  goto finish;
879  idx++;
880  eof = 0;
881  } else if (eof)
882  goto finish;
883  else
884  write_packet(of, pkt, ost, 0);
885  }
886  } else if (!eof)
887  write_packet(of, pkt, ost, 0);
888 
889 finish:
890  if (ret < 0 && ret != AVERROR_EOF) {
891  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893  if(exit_on_error)
894  exit_program(1);
895  }
896 }
897 
899 {
900  OutputFile *of = output_files[ost->file_index];
901 
902  if (of->recording_time != INT64_MAX &&
903  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
904  AV_TIME_BASE_Q) >= 0) {
906  return 0;
907  }
908  return 1;
909 }
910 
912  AVFrame *frame)
913 {
914  AVCodecContext *enc = ost->enc_ctx;
915  AVPacket pkt;
916  int ret;
917 
919  pkt.data = NULL;
920  pkt.size = 0;
921 
923  return;
924 
925  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926  frame->pts = ost->sync_opts;
927  ost->sync_opts = frame->pts + frame->nb_samples;
928  ost->samples_encoded += frame->nb_samples;
929  ost->frames_encoded++;
930 
931  av_assert0(pkt.size || !pkt.data);
933  if (debug_ts) {
934  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937  enc->time_base.num, enc->time_base.den);
938  }
939 
940  ret = avcodec_send_frame(enc, frame);
941  if (ret < 0)
942  goto error;
943 
944  while (1) {
945  ret = avcodec_receive_packet(enc, &pkt);
946  if (ret == AVERROR(EAGAIN))
947  break;
948  if (ret < 0)
949  goto error;
950 
951  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 
953  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
954 
955  if (debug_ts) {
956  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
960  }
961 
962  output_packet(of, &pkt, ost, 0);
963  }
964 
965  return;
966 error:
967  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
968  exit_program(1);
969 }
970 
971 static void do_subtitle_out(OutputFile *of,
972  OutputStream *ost,
973  AVSubtitle *sub)
974 {
975  int subtitle_out_max_size = 1024 * 1024;
976  int subtitle_out_size, nb, i;
977  AVCodecContext *enc;
978  AVPacket pkt;
979  int64_t pts;
980 
981  if (sub->pts == AV_NOPTS_VALUE) {
982  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
983  if (exit_on_error)
984  exit_program(1);
985  return;
986  }
987 
988  enc = ost->enc_ctx;
989 
990  if (!subtitle_out) {
991  subtitle_out = av_malloc(subtitle_out_max_size);
992  if (!subtitle_out) {
993  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
994  exit_program(1);
995  }
996  }
997 
998  /* Note: DVB subtitle need one packet to draw them and one other
999  packet to clear them */
1000  /* XXX: signal it in the codec context ? */
1001  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1002  nb = 2;
1003  else
1004  nb = 1;
1005 
1006  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007  pts = sub->pts;
1008  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009  pts -= output_files[ost->file_index]->start_time;
1010  for (i = 0; i < nb; i++) {
1011  unsigned save_num_rects = sub->num_rects;
1012 
1013  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  sub->pts = pts;
1018  // start_display_time is required to be 0
1019  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020  sub->end_display_time -= sub->start_display_time;
1021  sub->start_display_time = 0;
1022  if (i == 1)
1023  sub->num_rects = 0;
1024 
1025  ost->frames_encoded++;
1026 
1027  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028  subtitle_out_max_size, sub);
1029  if (i == 1)
1030  sub->num_rects = save_num_rects;
1031  if (subtitle_out_size < 0) {
1032  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1033  exit_program(1);
1034  }
1035 
1036  av_init_packet(&pkt);
1037  pkt.data = subtitle_out;
1038  pkt.size = subtitle_out_size;
1039  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042  /* XXX: the pts correction is handled here. Maybe handling
1043  it in the codec would be better */
1044  if (i == 0)
1045  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046  else
1047  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1048  }
1049  pkt.dts = pkt.pts;
1050  output_packet(of, &pkt, ost, 0);
1051  }
1052 }
1053 
1054 static void do_video_out(OutputFile *of,
1055  OutputStream *ost,
1056  AVFrame *next_picture,
1057  double sync_ipts)
1058 {
1059  int ret, format_video_sync;
1060  AVPacket pkt;
1061  AVCodecContext *enc = ost->enc_ctx;
1062  AVCodecParameters *mux_par = ost->st->codecpar;
1063  AVRational frame_rate;
1064  int nb_frames, nb0_frames, i;
1065  double delta, delta0;
1066  double duration = 0;
1067  int frame_size = 0;
1068  InputStream *ist = NULL;
1069  AVFilterContext *filter = ost->filter->filter;
1070 
1071  if (ost->source_index >= 0)
1072  ist = input_streams[ost->source_index];
1073 
1074  frame_rate = av_buffersink_get_frame_rate(filter);
1075  if (frame_rate.num > 0 && frame_rate.den > 0)
1076  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 
1078  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 
1081  if (!ost->filters_script &&
1082  !ost->filters &&
1083  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1084  next_picture &&
1085  ist &&
1086  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1087  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1088  }
1089 
1090  if (!next_picture) {
1091  //end, flushing
1092  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1093  ost->last_nb0_frames[1],
1094  ost->last_nb0_frames[2]);
1095  } else {
1096  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1097  delta = delta0 + duration;
1098 
1099  /* by default, we output a single frame */
1100  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1101  nb_frames = 1;
1102 
1103  format_video_sync = video_sync_method;
1104  if (format_video_sync == VSYNC_AUTO) {
1105  if(!strcmp(of->ctx->oformat->name, "avi")) {
1106  format_video_sync = VSYNC_VFR;
1107  } else
1108  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1109  if ( ist
1110  && format_video_sync == VSYNC_CFR
1111  && input_files[ist->file_index]->ctx->nb_streams == 1
1112  && input_files[ist->file_index]->input_ts_offset == 0) {
1113  format_video_sync = VSYNC_VSCFR;
1114  }
1115  if (format_video_sync == VSYNC_CFR && copy_ts) {
1116  format_video_sync = VSYNC_VSCFR;
1117  }
1118  }
1119  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1120 
1121  if (delta0 < 0 &&
1122  delta > 0 &&
1123  format_video_sync != VSYNC_PASSTHROUGH &&
1124  format_video_sync != VSYNC_DROP) {
1125  if (delta0 < -0.6) {
1126  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1127  } else
1128  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1129  sync_ipts = ost->sync_opts;
1130  duration += delta0;
1131  delta0 = 0;
1132  }
1133 
1134  switch (format_video_sync) {
1135  case VSYNC_VSCFR:
1136  if (ost->frame_number == 0 && delta0 >= 0.5) {
1137  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1138  delta = duration;
1139  delta0 = 0;
1140  ost->sync_opts = lrint(sync_ipts);
1141  }
1142  case VSYNC_CFR:
1143  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1144  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1145  nb_frames = 0;
1146  } else if (delta < -1.1)
1147  nb_frames = 0;
1148  else if (delta > 1.1) {
1149  nb_frames = lrintf(delta);
1150  if (delta0 > 1.1)
1151  nb0_frames = lrintf(delta0 - 0.6);
1152  }
1153  break;
1154  case VSYNC_VFR:
1155  if (delta <= -0.6)
1156  nb_frames = 0;
1157  else if (delta > 0.6)
1158  ost->sync_opts = lrint(sync_ipts);
1159  break;
1160  case VSYNC_DROP:
1161  case VSYNC_PASSTHROUGH:
1162  ost->sync_opts = lrint(sync_ipts);
1163  break;
1164  default:
1165  av_assert0(0);
1166  }
1167  }
1168 
1169  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1170  nb0_frames = FFMIN(nb0_frames, nb_frames);
1171 
1172  memmove(ost->last_nb0_frames + 1,
1173  ost->last_nb0_frames,
1174  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1175  ost->last_nb0_frames[0] = nb0_frames;
1176 
1177  if (nb0_frames == 0 && ost->last_dropped) {
1178  nb_frames_drop++;
1180  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1181  ost->frame_number, ost->st->index, ost->last_frame->pts);
1182  }
1183  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1184  if (nb_frames > dts_error_threshold * 30) {
1185  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1186  nb_frames_drop++;
1187  return;
1188  }
1189  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1190  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1191  if (nb_frames_dup > dup_warning) {
1192  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1193  dup_warning *= 10;
1194  }
1195  }
1196  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1197 
1198  /* duplicates frame if needed */
1199  for (i = 0; i < nb_frames; i++) {
1200  AVFrame *in_picture;
1201  int forced_keyframe = 0;
1202  double pts_time;
1203  av_init_packet(&pkt);
1204  pkt.data = NULL;
1205  pkt.size = 0;
1206 
1207  if (i < nb0_frames && ost->last_frame) {
1208  in_picture = ost->last_frame;
1209  } else
1210  in_picture = next_picture;
1211 
1212  if (!in_picture)
1213  return;
1214 
1215  in_picture->pts = ost->sync_opts;
1216 
1217  if (!check_recording_time(ost))
1218  return;
1219 
1221  ost->top_field_first >= 0)
1222  in_picture->top_field_first = !!ost->top_field_first;
1223 
1224  if (in_picture->interlaced_frame) {
1225  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1226  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1227  else
1228  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1229  } else
1230  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1231 
1232  in_picture->quality = enc->global_quality;
1233  in_picture->pict_type = 0;
1234 
1235  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1236  in_picture->pts != AV_NOPTS_VALUE)
1237  ost->forced_kf_ref_pts = in_picture->pts;
1238 
1239  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1240  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1241  if (ost->forced_kf_index < ost->forced_kf_count &&
1242  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1243  ost->forced_kf_index++;
1244  forced_keyframe = 1;
1245  } else if (ost->forced_keyframes_pexpr) {
1246  double res;
1247  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1248  res = av_expr_eval(ost->forced_keyframes_pexpr,
1249  ost->forced_keyframes_expr_const_values, NULL);
1250  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1251  ost->forced_keyframes_expr_const_values[FKF_N],
1252  ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1253  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1254  ost->forced_keyframes_expr_const_values[FKF_T],
1255  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1256  res);
1257  if (res) {
1258  forced_keyframe = 1;
1259  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1260  ost->forced_keyframes_expr_const_values[FKF_N];
1261  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1262  ost->forced_keyframes_expr_const_values[FKF_T];
1263  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1264  }
1265 
1266  ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1267  } else if ( ost->forced_keyframes
1268  && !strncmp(ost->forced_keyframes, "source", 6)
1269  && in_picture->key_frame==1) {
1270  forced_keyframe = 1;
1271  }
1272 
1273  if (forced_keyframe) {
1274  in_picture->pict_type = AV_PICTURE_TYPE_I;
1275  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1276  }
1277 
1279  if (debug_ts) {
1280  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1281  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1282  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1283  enc->time_base.num, enc->time_base.den);
1284  }
1285 
1286  ost->frames_encoded++;
1287 
1288  ret = avcodec_send_frame(enc, in_picture);
1289  if (ret < 0)
1290  goto error;
1291  // Make sure Closed Captions will not be duplicated
1293 
1294  while (1) {
1295  ret = avcodec_receive_packet(enc, &pkt);
1296  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1297  if (ret == AVERROR(EAGAIN))
1298  break;
1299  if (ret < 0)
1300  goto error;
1301 
1302  if (debug_ts) {
1303  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1304  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1307  }
1308 
1310  pkt.pts = ost->sync_opts;
1311 
1312  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1313 
1314  if (debug_ts) {
1315  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1316  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1317  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1318  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1319  }
1320 
1321  frame_size = pkt.size;
1322  output_packet(of, &pkt, ost, 0);
1323 
1324  /* if two pass, output log */
1325  if (ost->logfile && enc->stats_out) {
1326  fprintf(ost->logfile, "%s", enc->stats_out);
1327  }
1328  }
1329  ost->sync_opts++;
1330  /*
1331  * For video, number of frames in == number of packets out.
1332  * But there may be reordering, so we can't throw away frames on encoder
1333  * flush, we need to limit them here, before they go into encoder.
1334  */
1335  ost->frame_number++;
1336 
1337  if (vstats_filename && frame_size)
1339  }
1340 
1341  if (!ost->last_frame)
1342  ost->last_frame = av_frame_alloc();
1343  av_frame_unref(ost->last_frame);
1344  if (next_picture && ost->last_frame)
1345  av_frame_ref(ost->last_frame, next_picture);
1346  else
1347  av_frame_free(&ost->last_frame);
1348 
1349  return;
1350 error:
1351  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1352  exit_program(1);
1353 }
1354 
1355 static double psnr(double d)
1356 {
1357  return -10.0 * log10(d);
1358 }
1359 
1361 {
1362  AVCodecContext *enc;
1363  int frame_number;
1364  double ti1, bitrate, avg_bitrate;
1365 
1366  /* this is executed just the first time do_video_stats is called */
1367  if (!vstats_file) {
1368  vstats_file = fopen(vstats_filename, "w");
1369  if (!vstats_file) {
1370  perror("fopen");
1371  exit_program(1);
1372  }
1373  }
1374 
1375  enc = ost->enc_ctx;
1376  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1377  frame_number = ost->st->nb_frames;
1378  if (vstats_version <= 1) {
1379  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1380  ost->quality / (float)FF_QP2LAMBDA);
1381  } else {
1382  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1383  ost->quality / (float)FF_QP2LAMBDA);
1384  }
1385 
1386  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1387  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1388 
1389  fprintf(vstats_file,"f_size= %6d ", frame_size);
1390  /* compute pts value */
1391  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1392  if (ti1 < 0.01)
1393  ti1 = 0.01;
1394 
1395  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1396  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1397  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1398  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1399  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1400  }
1401 }
1402 
1403 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1404 
1406 {
1407  OutputFile *of = output_files[ost->file_index];
1408  int i;
1409 
1410  ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1411 
1412  if (of->shortest) {
1413  for (i = 0; i < of->ctx->nb_streams; i++)
1415  }
1416 }
1417 
1418 /**
1419  * Get and encode new output from any of the filtergraphs, without causing
1420  * activity.
1421  *
1422  * @return 0 for success, <0 for severe errors
1423  */
1424 static int reap_filters(int flush)
1425 {
1426  AVFrame *filtered_frame = NULL;
1427  int i;
1428 
1429  /* Reap all buffers present in the buffer sinks */
1430  for (i = 0; i < nb_output_streams; i++) {
1432  OutputFile *of = output_files[ost->file_index];
1434  AVCodecContext *enc = ost->enc_ctx;
1435  int ret = 0;
1436 
1437  if (!ost->filter || !ost->filter->graph->graph)
1438  continue;
1439  filter = ost->filter->filter;
1440 
1441  if (!ost->initialized) {
1442  char error[1024] = "";
1443  ret = init_output_stream(ost, error, sizeof(error));
1444  if (ret < 0) {
1445  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1446  ost->file_index, ost->index, error);
1447  exit_program(1);
1448  }
1449  }
1450 
1451  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1452  return AVERROR(ENOMEM);
1453  }
1454  filtered_frame = ost->filtered_frame;
1455 
1456  while (1) {
1457  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1458  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1460  if (ret < 0) {
1461  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1463  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1464  } else if (flush && ret == AVERROR_EOF) {
1467  }
1468  break;
1469  }
1470  if (ost->finished) {
1471  av_frame_unref(filtered_frame);
1472  continue;
1473  }
1474  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1475  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1477  AVRational tb = enc->time_base;
1478  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1479 
1480  tb.den <<= extra_bits;
1481  float_pts =
1482  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1484  float_pts /= 1 << extra_bits;
1485  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1486  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1487 
1488  filtered_frame->pts =
1489  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1491  }
1492 
1493  switch (av_buffersink_get_type(filter)) {
1494  case AVMEDIA_TYPE_VIDEO:
1495  if (!ost->frame_aspect_ratio.num)
1496  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1497 
1498  if (debug_ts) {
1499  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1500  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1501  float_pts,
1502  enc->time_base.num, enc->time_base.den);
1503  }
1504 
1505  do_video_out(of, ost, filtered_frame, float_pts);
1506  break;
1507  case AVMEDIA_TYPE_AUDIO:
1508  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1509  enc->channels != filtered_frame->channels) {
1511  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1512  break;
1513  }
1514  do_audio_out(of, ost, filtered_frame);
1515  break;
1516  default:
1517  // TODO support subtitle filters
1518  av_assert0(0);
1519  }
1520 
1521  av_frame_unref(filtered_frame);
1522  }
1523  }
1524 
1525  return 0;
1526 }
1527 
1528 static void print_final_stats(int64_t total_size)
1529 {
1530  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1531  uint64_t subtitle_size = 0;
1532  uint64_t data_size = 0;
1533  float percent = -1.0;
1534  int i, j;
1535  int pass1_used = 1;
1536 
1537  for (i = 0; i < nb_output_streams; i++) {
1539  switch (ost->enc_ctx->codec_type) {
1540  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1541  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1542  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1543  default: other_size += ost->data_size; break;
1544  }
1545  extra_size += ost->enc_ctx->extradata_size;
1546  data_size += ost->data_size;
1547  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1549  pass1_used = 0;
1550  }
1551 
1552  if (data_size && total_size>0 && total_size >= data_size)
1553  percent = 100.0 * (total_size - data_size) / data_size;
1554 
1555  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1556  video_size / 1024.0,
1557  audio_size / 1024.0,
1558  subtitle_size / 1024.0,
1559  other_size / 1024.0,
1560  extra_size / 1024.0);
1561  if (percent >= 0.0)
1562  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1563  else
1564  av_log(NULL, AV_LOG_INFO, "unknown");
1565  av_log(NULL, AV_LOG_INFO, "\n");
1566 
1567  /* print verbose per-stream stats */
1568  for (i = 0; i < nb_input_files; i++) {
1569  InputFile *f = input_files[i];
1570  uint64_t total_packets = 0, total_size = 0;
1571 
1572  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1573  i, f->ctx->url);
1574 
1575  for (j = 0; j < f->nb_streams; j++) {
1576  InputStream *ist = input_streams[f->ist_index + j];
1577  enum AVMediaType type = ist->dec_ctx->codec_type;
1578 
1579  total_size += ist->data_size;
1580  total_packets += ist->nb_packets;
1581 
1582  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1583  i, j, media_type_string(type));
1584  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1585  ist->nb_packets, ist->data_size);
1586 
1587  if (ist->decoding_needed) {
1588  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1589  ist->frames_decoded);
1590  if (type == AVMEDIA_TYPE_AUDIO)
1591  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1592  av_log(NULL, AV_LOG_VERBOSE, "; ");
1593  }
1594 
1595  av_log(NULL, AV_LOG_VERBOSE, "\n");
1596  }
1597 
1598  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1599  total_packets, total_size);
1600  }
1601 
1602  for (i = 0; i < nb_output_files; i++) {
1603  OutputFile *of = output_files[i];
1604  uint64_t total_packets = 0, total_size = 0;
1605 
1606  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1607  i, of->ctx->url);
1608 
1609  for (j = 0; j < of->ctx->nb_streams; j++) {
1611  enum AVMediaType type = ost->enc_ctx->codec_type;
1612 
1613  total_size += ost->data_size;
1614  total_packets += ost->packets_written;
1615 
1616  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1617  i, j, media_type_string(type));
1618  if (ost->encoding_needed) {
1619  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1620  ost->frames_encoded);
1621  if (type == AVMEDIA_TYPE_AUDIO)
1622  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1623  av_log(NULL, AV_LOG_VERBOSE, "; ");
1624  }
1625 
1626  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1627  ost->packets_written, ost->data_size);
1628 
1629  av_log(NULL, AV_LOG_VERBOSE, "\n");
1630  }
1631 
1632  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1633  total_packets, total_size);
1634  }
1635  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1636  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1637  if (pass1_used) {
1638  av_log(NULL, AV_LOG_WARNING, "\n");
1639  } else {
1640  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1641  }
1642  }
1643 }
1644 
1645 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1646 {
1647  AVBPrint buf, buf_script;
1648  OutputStream *ost;
1649  AVFormatContext *oc;
1650  int64_t total_size;
1651  AVCodecContext *enc;
1652  int frame_number, vid, i;
1653  double bitrate;
1654  double speed;
1655  int64_t pts = INT64_MIN + 1;
1656  static int64_t last_time = -1;
1657  static int qp_histogram[52];
1658  int hours, mins, secs, us;
1659  const char *hours_sign;
1660  int ret;
1661  float t;
1662 
1663  if (!print_stats && !is_last_report && !progress_avio)
1664  return;
1665 
1666  if (!is_last_report) {
1667  if (last_time == -1) {
1668  last_time = cur_time;
1669  return;
1670  }
1671  if ((cur_time - last_time) < 500000)
1672  return;
1673  last_time = cur_time;
1674  }
1675 
1676  t = (cur_time-timer_start) / 1000000.0;
1677 
1678 
1679  oc = output_files[0]->ctx;
1680 
1681  total_size = avio_size(oc->pb);
1682  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1683  total_size = avio_tell(oc->pb);
1684 
1685  vid = 0;
1687  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1688  for (i = 0; i < nb_output_streams; i++) {
1689  float q = -1;
1690  ost = output_streams[i];
1691  enc = ost->enc_ctx;
1692  if (!ost->stream_copy)
1693  q = ost->quality / (float) FF_QP2LAMBDA;
1694 
1695  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1696  av_bprintf(&buf, "q=%2.1f ", q);
1697  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1698  ost->file_index, ost->index, q);
1699  }
1700  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1701  float fps;
1702 
1703  frame_number = ost->frame_number;
1704  fps = t > 1 ? frame_number / t : 0;
1705  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1706  frame_number, fps < 9.95, fps, q);
1707  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1708  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1709  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1710  ost->file_index, ost->index, q);
1711  if (is_last_report)
1712  av_bprintf(&buf, "L");
1713  if (qp_hist) {
1714  int j;
1715  int qp = lrintf(q);
1716  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1717  qp_histogram[qp]++;
1718  for (j = 0; j < 32; j++)
1719  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1720  }
1721 
1722  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1723  int j;
1724  double error, error_sum = 0;
1725  double scale, scale_sum = 0;
1726  double p;
1727  char type[3] = { 'Y','U','V' };
1728  av_bprintf(&buf, "PSNR=");
1729  for (j = 0; j < 3; j++) {
1730  if (is_last_report) {
1731  error = enc->error[j];
1732  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1733  } else {
1734  error = ost->error[j];
1735  scale = enc->width * enc->height * 255.0 * 255.0;
1736  }
1737  if (j)
1738  scale /= 4;
1739  error_sum += error;
1740  scale_sum += scale;
1741  p = psnr(error / scale);
1742  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1743  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1744  ost->file_index, ost->index, type[j] | 32, p);
1745  }
1746  p = psnr(error_sum / scale_sum);
1747  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1748  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1749  ost->file_index, ost->index, p);
1750  }
1751  vid = 1;
1752  }
1753  /* compute min output value */
1756  ost->st->time_base, AV_TIME_BASE_Q));
1757  if (is_last_report)
1758  nb_frames_drop += ost->last_dropped;
1759  }
1760 
1761  secs = FFABS(pts) / AV_TIME_BASE;
1762  us = FFABS(pts) % AV_TIME_BASE;
1763  mins = secs / 60;
1764  secs %= 60;
1765  hours = mins / 60;
1766  mins %= 60;
1767  hours_sign = (pts < 0) ? "-" : "";
1768 
1769  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1770  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1771 
1772  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1773  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1774  if (pts == AV_NOPTS_VALUE) {
1775  av_bprintf(&buf, "N/A ");
1776  } else {
1777  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1778  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1779  }
1780 
1781  if (bitrate < 0) {
1782  av_bprintf(&buf, "bitrate=N/A");
1783  av_bprintf(&buf_script, "bitrate=N/A\n");
1784  }else{
1785  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1786  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1787  }
1788 
1789  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1790  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1791  if (pts == AV_NOPTS_VALUE) {
1792  av_bprintf(&buf_script, "out_time_us=N/A\n");
1793  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1794  av_bprintf(&buf_script, "out_time=N/A\n");
1795  } else {
1796  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1797  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1798  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1799  hours_sign, hours, mins, secs, us);
1800  }
1801 
1803  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1804  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1805  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1806 
1807  if (speed < 0) {
1808  av_bprintf(&buf, " speed=N/A");
1809  av_bprintf(&buf_script, "speed=N/A\n");
1810  } else {
1811  av_bprintf(&buf, " speed=%4.3gx", speed);
1812  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1813  }
1814 
1815  if (print_stats || is_last_report) {
1816  const char end = is_last_report ? '\n' : '\r';
1817  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1818  fprintf(stderr, "%s %c", buf.str, end);
1819  } else
1820  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1821 
1822  fflush(stderr);
1823  }
1825 
1826  if (progress_avio) {
1827  av_bprintf(&buf_script, "progress=%s\n",
1828  is_last_report ? "end" : "continue");
1829  avio_write(progress_avio, buf_script.str,
1830  FFMIN(buf_script.len, buf_script.size - 1));
1832  av_bprint_finalize(&buf_script, NULL);
1833  if (is_last_report) {
1834  if ((ret = avio_closep(&progress_avio)) < 0)
1836  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1837  }
1838  }
1839 
1840  if (is_last_report)
1841  print_final_stats(total_size);
1842 }
1843 
1845 {
1846  // We never got any input. Set a fake format, which will
1847  // come from libavformat.
1848  ifilter->format = par->format;
1849  ifilter->sample_rate = par->sample_rate;
1850  ifilter->channels = par->channels;
1851  ifilter->channel_layout = par->channel_layout;
1852  ifilter->width = par->width;
1853  ifilter->height = par->height;
1854  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1855 }
1856 
1857 static void flush_encoders(void)
1858 {
1859  int i, ret;
1860 
1861  for (i = 0; i < nb_output_streams; i++) {
1863  AVCodecContext *enc = ost->enc_ctx;
1864  OutputFile *of = output_files[ost->file_index];
1865 
1866  if (!ost->encoding_needed)
1867  continue;
1868 
1869  // Try to enable encoding with no input frames.
1870  // Maybe we should just let encoding fail instead.
1871  if (!ost->initialized) {
1872  FilterGraph *fg = ost->filter->graph;
1873  char error[1024] = "";
1874 
1876  "Finishing stream %d:%d without any data written to it.\n",
1877  ost->file_index, ost->st->index);
1878 
1879  if (ost->filter && !fg->graph) {
1880  int x;
1881  for (x = 0; x < fg->nb_inputs; x++) {
1882  InputFilter *ifilter = fg->inputs[x];
1883  if (ifilter->format < 0)
1884  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1885  }
1886 
1888  continue;
1889 
1890  ret = configure_filtergraph(fg);
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1893  exit_program(1);
1894  }
1895 
1897  }
1898 
1899  ret = init_output_stream(ost, error, sizeof(error));
1900  if (ret < 0) {
1901  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1902  ost->file_index, ost->index, error);
1903  exit_program(1);
1904  }
1905  }
1906 
1907  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1908  continue;
1909 
1911  continue;
1912 
1913  for (;;) {
1914  const char *desc = NULL;
1915  AVPacket pkt;
1916  int pkt_size;
1917 
1918  switch (enc->codec_type) {
1919  case AVMEDIA_TYPE_AUDIO:
1920  desc = "audio";
1921  break;
1922  case AVMEDIA_TYPE_VIDEO:
1923  desc = "video";
1924  break;
1925  default:
1926  av_assert0(0);
1927  }
1928 
1929  av_init_packet(&pkt);
1930  pkt.data = NULL;
1931  pkt.size = 0;
1932 
1934 
1935  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1936  ret = avcodec_send_frame(enc, NULL);
1937  if (ret < 0) {
1938  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1939  desc,
1940  av_err2str(ret));
1941  exit_program(1);
1942  }
1943  }
1944 
1945  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1946  if (ret < 0 && ret != AVERROR_EOF) {
1947  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1948  desc,
1949  av_err2str(ret));
1950  exit_program(1);
1951  }
1952  if (ost->logfile && enc->stats_out) {
1953  fprintf(ost->logfile, "%s", enc->stats_out);
1954  }
1955  if (ret == AVERROR_EOF) {
1956  output_packet(of, &pkt, ost, 1);
1957  break;
1958  }
1959  if (ost->finished & MUXER_FINISHED) {
1960  av_packet_unref(&pkt);
1961  continue;
1962  }
1963  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1964  pkt_size = pkt.size;
1965  output_packet(of, &pkt, ost, 0);
1966  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
1967  do_video_stats(ost, pkt_size);
1968  }
1969  }
1970  }
1971 }
1972 
1973 /*
1974  * Check whether a packet from ist should be written into ost at this time
1975  */
1977 {
1978  OutputFile *of = output_files[ost->file_index];
1979  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1980 
1981  if (ost->source_index != ist_index)
1982  return 0;
1983 
1984  if (ost->finished)
1985  return 0;
1986 
1987  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1988  return 0;
1989 
1990  return 1;
1991 }
1992 
1994 {
1995  OutputFile *of = output_files[ost->file_index];
1996  InputFile *f = input_files [ist->file_index];
1997  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1998  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1999  AVPacket opkt = { 0 };
2000 
2001  av_init_packet(&opkt);
2002 
2003  // EOF: flush output bitstream filters.
2004  if (!pkt) {
2005  output_packet(of, &opkt, ost, 1);
2006  return;
2007  }
2008 
2009  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2010  !ost->copy_initial_nonkeyframes)
2011  return;
2012 
2013  if (!ost->frame_number && !ost->copy_prior_start) {
2014  int64_t comp_start = start_time;
2015  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2016  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2017  if (pkt->pts == AV_NOPTS_VALUE ?
2018  ist->pts < comp_start :
2019  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2020  return;
2021  }
2022 
2023  if (of->recording_time != INT64_MAX &&
2024  ist->pts >= of->recording_time + start_time) {
2026  return;
2027  }
2028 
2029  if (f->recording_time != INT64_MAX) {
2030  start_time = f->ctx->start_time;
2031  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2032  start_time += f->start_time;
2033  if (ist->pts >= f->recording_time + start_time) {
2035  return;
2036  }
2037  }
2038 
2039  /* force the input stream PTS */
2040  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2041  ost->sync_opts++;
2042 
2043  if (pkt->pts != AV_NOPTS_VALUE)
2044  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2045  else
2046  opkt.pts = AV_NOPTS_VALUE;
2047 
2048  if (pkt->dts == AV_NOPTS_VALUE)
2049  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2050  else
2051  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2052  opkt.dts -= ost_tb_start_time;
2053 
2056  if(!duration)
2057  duration = ist->dec_ctx->frame_size;
2058  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2059  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2060  ost->mux_timebase) - ost_tb_start_time;
2061  }
2062 
2063  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2064 
2065  opkt.flags = pkt->flags;
2066 
2067  if (pkt->buf) {
2068  opkt.buf = av_buffer_ref(pkt->buf);
2069  if (!opkt.buf)
2070  exit_program(1);
2071  }
2072  opkt.data = pkt->data;
2073  opkt.size = pkt->size;
2074 
2075  av_copy_packet_side_data(&opkt, pkt);
2076 
2077  output_packet(of, &opkt, ost, 0);
2078 }
2079 
2081 {
2082  AVCodecContext *dec = ist->dec_ctx;
2083 
2084  if (!dec->channel_layout) {
2085  char layout_name[256];
2086 
2087  if (dec->channels > ist->guess_layout_max)
2088  return 0;
2090  if (!dec->channel_layout)
2091  return 0;
2092  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2093  dec->channels, dec->channel_layout);
2094  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2095  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2096  }
2097  return 1;
2098 }
2099 
2100 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2101 {
2102  if (*got_output || ret<0)
2103  decode_error_stat[ret<0] ++;
2104 
2105  if (ret < 0 && exit_on_error)
2106  exit_program(1);
2107 
2108  if (*got_output && ist) {
2111  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2112  if (exit_on_error)
2113  exit_program(1);
2114  }
2115  }
2116 }
2117 
2118 // Filters can be configured only if the formats of all inputs are known.
2120 {
2121  int i;
2122  for (i = 0; i < fg->nb_inputs; i++) {
2123  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2124  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2125  return 0;
2126  }
2127  return 1;
2128 }
2129 
2131 {
2132  FilterGraph *fg = ifilter->graph;
2133  int need_reinit, ret, i;
2134 
2135  /* determine if the parameters for this input changed */
2136  need_reinit = ifilter->format != frame->format;
2137 
2138  switch (ifilter->ist->st->codecpar->codec_type) {
2139  case AVMEDIA_TYPE_AUDIO:
2140  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2141  ifilter->channels != frame->channels ||
2142  ifilter->channel_layout != frame->channel_layout;
2143  break;
2144  case AVMEDIA_TYPE_VIDEO:
2145  need_reinit |= ifilter->width != frame->width ||
2146  ifilter->height != frame->height;
2147  break;
2148  }
2149 
2150  if (!ifilter->ist->reinit_filters && fg->graph)
2151  need_reinit = 0;
2152 
2153  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2154  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2155  need_reinit = 1;
2156 
2157  if (need_reinit) {
2159  if (ret < 0)
2160  return ret;
2161  }
2162 
2163  /* (re)init the graph if possible, otherwise buffer the frame and return */
2164  if (need_reinit || !fg->graph) {
2165  for (i = 0; i < fg->nb_inputs; i++) {
2166  if (!ifilter_has_all_input_formats(fg)) {
2168  if (!tmp)
2169  return AVERROR(ENOMEM);
2171 
2172  if (!av_fifo_space(ifilter->frame_queue)) {
2173  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2174  if (ret < 0) {
2175  av_frame_free(&tmp);
2176  return ret;
2177  }
2178  }
2179  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2180  return 0;
2181  }
2182  }
2183 
2184  ret = reap_filters(1);
2185  if (ret < 0 && ret != AVERROR_EOF) {
2186  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2187  return ret;
2188  }
2189 
2190  ret = configure_filtergraph(fg);
2191  if (ret < 0) {
2192  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2193  return ret;
2194  }
2195  }
2196 
2198  if (ret < 0) {
2199  if (ret != AVERROR_EOF)
2200  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2201  return ret;
2202  }
2203 
2204  return 0;
2205 }
2206 
2207 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2208 {
2209  int ret;
2210 
2211  ifilter->eof = 1;
2212 
2213  if (ifilter->filter) {
2215  if (ret < 0)
2216  return ret;
2217  } else {
2218  // the filtergraph was never configured
2219  if (ifilter->format < 0)
2220  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2221  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2222  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2223  return AVERROR_INVALIDDATA;
2224  }
2225  }
2226 
2227  return 0;
2228 }
2229 
2230 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2231 // There is the following difference: if you got a frame, you must call
2232 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2233 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2234 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2235 {
2236  int ret;
2237 
2238  *got_frame = 0;
2239 
2240  if (pkt) {
2241  ret = avcodec_send_packet(avctx, pkt);
2242  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2243  // decoded frames with avcodec_receive_frame() until done.
2244  if (ret < 0 && ret != AVERROR_EOF)
2245  return ret;
2246  }
2247 
2248  ret = avcodec_receive_frame(avctx, frame);
2249  if (ret < 0 && ret != AVERROR(EAGAIN))
2250  return ret;
2251  if (ret >= 0)
2252  *got_frame = 1;
2253 
2254  return 0;
2255 }
2256 
2257 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2258 {
2259  int i, ret;
2260  AVFrame *f;
2261 
2262  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2263  for (i = 0; i < ist->nb_filters; i++) {
2264  if (i < ist->nb_filters - 1) {
2265  f = ist->filter_frame;
2266  ret = av_frame_ref(f, decoded_frame);
2267  if (ret < 0)
2268  break;
2269  } else
2270  f = decoded_frame;
2271  ret = ifilter_send_frame(ist->filters[i], f);
2272  if (ret == AVERROR_EOF)
2273  ret = 0; /* ignore */
2274  if (ret < 0) {
2276  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2277  break;
2278  }
2279  }
2280  return ret;
2281 }
2282 
2283 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2284  int *decode_failed)
2285 {
2286  AVFrame *decoded_frame;
2287  AVCodecContext *avctx = ist->dec_ctx;
2288  int ret, err = 0;
2289  AVRational decoded_frame_tb;
2290 
2291  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2292  return AVERROR(ENOMEM);
2293  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2294  return AVERROR(ENOMEM);
2295  decoded_frame = ist->decoded_frame;
2296 
2298  ret = decode(avctx, decoded_frame, got_output, pkt);
2299  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2300  if (ret < 0)
2301  *decode_failed = 1;
2302 
2303  if (ret >= 0 && avctx->sample_rate <= 0) {
2304  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2306  }
2307 
2308  if (ret != AVERROR_EOF)
2309  check_decode_result(ist, got_output, ret);
2310 
2311  if (!*got_output || ret < 0)
2312  return ret;
2313 
2314  ist->samples_decoded += decoded_frame->nb_samples;
2315  ist->frames_decoded++;
2316 
2317  /* increment next_dts to use for the case where the input stream does not
2318  have timestamps or there are multiple frames in the packet */
2319  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2320  avctx->sample_rate;
2321  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2322  avctx->sample_rate;
2323 
2324  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2325  decoded_frame_tb = ist->st->time_base;
2326  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2327  decoded_frame->pts = pkt->pts;
2328  decoded_frame_tb = ist->st->time_base;
2329  }else {
2330  decoded_frame->pts = ist->dts;
2331  decoded_frame_tb = AV_TIME_BASE_Q;
2332  }
2333  if (decoded_frame->pts != AV_NOPTS_VALUE)
2334  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2335  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2336  (AVRational){1, avctx->sample_rate});
2337  ist->nb_samples = decoded_frame->nb_samples;
2338  err = send_frame_to_filters(ist, decoded_frame);
2339 
2341  av_frame_unref(decoded_frame);
2342  return err < 0 ? err : ret;
2343 }
2344 
2345 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2346  int *decode_failed)
2347 {
2348  AVFrame *decoded_frame;
2349  int i, ret = 0, err = 0;
2350  int64_t best_effort_timestamp;
2351  int64_t dts = AV_NOPTS_VALUE;
2352  AVPacket avpkt;
2353 
2354  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2355  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2356  // skip the packet.
2357  if (!eof && pkt && pkt->size == 0)
2358  return 0;
2359 
2360  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2361  return AVERROR(ENOMEM);
2362  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2363  return AVERROR(ENOMEM);
2364  decoded_frame = ist->decoded_frame;
2365  if (ist->dts != AV_NOPTS_VALUE)
2366  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2367  if (pkt) {
2368  avpkt = *pkt;
2369  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2370  }
2371 
2372  // The old code used to set dts on the drain packet, which does not work
2373  // with the new API anymore.
2374  if (eof) {
2375  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2376  if (!new)
2377  return AVERROR(ENOMEM);
2378  ist->dts_buffer = new;
2379  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2380  }
2381 
2383  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2384  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2385  if (ret < 0)
2386  *decode_failed = 1;
2387 
2388  // The following line may be required in some cases where there is no parser
2389  // or the parser does not has_b_frames correctly
2390  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2391  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2392  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2393  } else
2395  "video_delay is larger in decoder than demuxer %d > %d.\n"
2396  "If you want to help, upload a sample "
2397  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2398  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2399  ist->dec_ctx->has_b_frames,
2400  ist->st->codecpar->video_delay);
2401  }
2402 
2403  if (ret != AVERROR_EOF)
2404  check_decode_result(ist, got_output, ret);
2405 
2406  if (*got_output && ret >= 0) {
2407  if (ist->dec_ctx->width != decoded_frame->width ||
2408  ist->dec_ctx->height != decoded_frame->height ||
2409  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2410  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2411  decoded_frame->width,
2412  decoded_frame->height,
2413  decoded_frame->format,
2414  ist->dec_ctx->width,
2415  ist->dec_ctx->height,
2416  ist->dec_ctx->pix_fmt);
2417  }
2418  }
2419 
2420  if (!*got_output || ret < 0)
2421  return ret;
2422 
2423  if(ist->top_field_first>=0)
2424  decoded_frame->top_field_first = ist->top_field_first;
2425 
2426  ist->frames_decoded++;
2427 
2428  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2429  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2430  if (err < 0)
2431  goto fail;
2432  }
2433  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2434 
2435  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2436  *duration_pts = decoded_frame->pkt_duration;
2437 
2438  if (ist->framerate.num)
2439  best_effort_timestamp = ist->cfr_next_pts++;
2440 
2441  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2442  best_effort_timestamp = ist->dts_buffer[0];
2443 
2444  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2445  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2446  ist->nb_dts_buffer--;
2447  }
2448 
2449  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2450  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2451 
2452  if (ts != AV_NOPTS_VALUE)
2453  ist->next_pts = ist->pts = ts;
2454  }
2455 
2456  if (debug_ts) {
2457  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2458  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2459  ist->st->index, av_ts2str(decoded_frame->pts),
2460  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2461  best_effort_timestamp,
2462  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2463  decoded_frame->key_frame, decoded_frame->pict_type,
2464  ist->st->time_base.num, ist->st->time_base.den);
2465  }
2466 
2467  if (ist->st->sample_aspect_ratio.num)
2468  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2469 
2470  err = send_frame_to_filters(ist, decoded_frame);
2471 
2472 fail:
2474  av_frame_unref(decoded_frame);
2475  return err < 0 ? err : ret;
2476 }
2477 
2478 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2479  int *decode_failed)
2480 {
2481  AVSubtitle subtitle;
2482  int free_sub = 1;
2483  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2484  &subtitle, got_output, pkt);
2485 
2486  check_decode_result(NULL, got_output, ret);
2487 
2488  if (ret < 0 || !*got_output) {
2489  *decode_failed = 1;
2490  if (!pkt->size)
2491  sub2video_flush(ist);
2492  return ret;
2493  }
2494 
2495  if (ist->fix_sub_duration) {
2496  int end = 1;
2497  if (ist->prev_sub.got_output) {
2498  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2499  1000, AV_TIME_BASE);
2500  if (end < ist->prev_sub.subtitle.end_display_time) {
2501  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2502  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2504  end <= 0 ? ", dropping it" : "");
2506  }
2507  }
2508  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2509  FFSWAP(int, ret, ist->prev_sub.ret);
2510  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2511  if (end <= 0)
2512  goto out;
2513  }
2514 
2515  if (!*got_output)
2516  return ret;
2517 
2518  if (ist->sub2video.frame) {
2519  sub2video_update(ist, &subtitle);
2520  } else if (ist->nb_filters) {
2521  if (!ist->sub2video.sub_queue)
2522  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2523  if (!ist->sub2video.sub_queue)
2524  exit_program(1);
2525  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2527  if (ret < 0)
2528  exit_program(1);
2529  }
2530  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2531  free_sub = 0;
2532  }
2533 
2534  if (!subtitle.num_rects)
2535  goto out;
2536 
2537  ist->frames_decoded++;
2538 
2539  for (i = 0; i < nb_output_streams; i++) {
2541 
2542  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2543  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2544  continue;
2545 
2546  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2547  }
2548 
2549 out:
2550  if (free_sub)
2551  avsubtitle_free(&subtitle);
2552  return ret;
2553 }
2554 
2556 {
2557  int i, ret;
2558  /* TODO keep pts also in stream time base to avoid converting back */
2559  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2561 
2562  for (i = 0; i < ist->nb_filters; i++) {
2563  ret = ifilter_send_eof(ist->filters[i], pts);
2564  if (ret < 0)
2565  return ret;
2566  }
2567  return 0;
2568 }
2569 
2570 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2571 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2572 {
2573  int ret = 0, i;
2574  int repeating = 0;
2575  int eof_reached = 0;
2576 
2577  AVPacket avpkt;
2578  if (!ist->saw_first_ts) {
2579  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2580  ist->pts = 0;
2581  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2582  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2583  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2584  }
2585  ist->saw_first_ts = 1;
2586  }
2587 
2588  if (ist->next_dts == AV_NOPTS_VALUE)
2589  ist->next_dts = ist->dts;
2590  if (ist->next_pts == AV_NOPTS_VALUE)
2591  ist->next_pts = ist->pts;
2592 
2593  if (!pkt) {
2594  /* EOF handling */
2595  av_init_packet(&avpkt);
2596  avpkt.data = NULL;
2597  avpkt.size = 0;
2598  } else {
2599  avpkt = *pkt;
2600  }
2601 
2602  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2603  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2604  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2605  ist->next_pts = ist->pts = ist->dts;
2606  }
2607 
2608  // while we have more to decode or while the decoder did output something on EOF
2609  while (ist->decoding_needed) {
2610  int64_t duration_dts = 0;
2611  int64_t duration_pts = 0;
2612  int got_output = 0;
2613  int decode_failed = 0;
2614 
2615  ist->pts = ist->next_pts;
2616  ist->dts = ist->next_dts;
2617 
2618  switch (ist->dec_ctx->codec_type) {
2619  case AVMEDIA_TYPE_AUDIO:
2620  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2621  &decode_failed);
2622  break;
2623  case AVMEDIA_TYPE_VIDEO:
2624  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2625  &decode_failed);
2626  if (!repeating || !pkt || got_output) {
2627  if (pkt && pkt->duration) {
2628  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2629  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2631  duration_dts = ((int64_t)AV_TIME_BASE *
2632  ist->dec_ctx->framerate.den * ticks) /
2634  }
2635 
2636  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2637  ist->next_dts += duration_dts;
2638  }else
2639  ist->next_dts = AV_NOPTS_VALUE;
2640  }
2641 
2642  if (got_output) {
2643  if (duration_pts > 0) {
2644  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2645  } else {
2646  ist->next_pts += duration_dts;
2647  }
2648  }
2649  break;
2650  case AVMEDIA_TYPE_SUBTITLE:
2651  if (repeating)
2652  break;
2653  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2654  if (!pkt && ret >= 0)
2655  ret = AVERROR_EOF;
2656  break;
2657  default:
2658  return -1;
2659  }
2660 
2661  if (ret == AVERROR_EOF) {
2662  eof_reached = 1;
2663  break;
2664  }
2665 
2666  if (ret < 0) {
2667  if (decode_failed) {
2668  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2669  ist->file_index, ist->st->index, av_err2str(ret));
2670  } else {
2671  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2672  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2673  }
2674  if (!decode_failed || exit_on_error)
2675  exit_program(1);
2676  break;
2677  }
2678 
2679  if (got_output)
2680  ist->got_output = 1;
2681 
2682  if (!got_output)
2683  break;
2684 
2685  // During draining, we might get multiple output frames in this loop.
2686  // ffmpeg.c does not drain the filter chain on configuration changes,
2687  // which means if we send multiple frames at once to the filters, and
2688  // one of those frames changes configuration, the buffered frames will
2689  // be lost. This can upset certain FATE tests.
2690  // Decode only 1 frame per call on EOF to appease these FATE tests.
2691  // The ideal solution would be to rewrite decoding to use the new
2692  // decoding API in a better way.
2693  if (!pkt)
2694  break;
2695 
2696  repeating = 1;
2697  }
2698 
2699  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2700  /* except when looping we need to flush but not to send an EOF */
2701  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2702  int ret = send_filter_eof(ist);
2703  if (ret < 0) {
2704  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2705  exit_program(1);
2706  }
2707  }
2708 
2709  /* handle stream copy */
2710  if (!ist->decoding_needed && pkt) {
2711  ist->dts = ist->next_dts;
2712  switch (ist->dec_ctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714  av_assert1(pkt->duration >= 0);
2715  if (ist->dec_ctx->sample_rate) {
2716  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2717  ist->dec_ctx->sample_rate;
2718  } else {
2720  }
2721  break;
2722  case AVMEDIA_TYPE_VIDEO:
2723  if (ist->framerate.num) {
2724  // TODO: Remove work-around for c99-to-c89 issue 7
2725  AVRational time_base_q = AV_TIME_BASE_Q;
2726  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2727  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2728  } else if (pkt->duration) {
2730  } else if(ist->dec_ctx->framerate.num != 0) {
2731  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2732  ist->next_dts += ((int64_t)AV_TIME_BASE *
2733  ist->dec_ctx->framerate.den * ticks) /
2735  }
2736  break;
2737  }
2738  ist->pts = ist->dts;
2739  ist->next_pts = ist->next_dts;
2740  }
2741  for (i = 0; i < nb_output_streams; i++) {
2743 
2744  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2745  continue;
2746 
2747  do_streamcopy(ist, ost, pkt);
2748  }
2749 
2750  return !eof_reached;
2751 }
2752 
2753 static void print_sdp(void)
2754 {
2755  char sdp[16384];
2756  int i;
2757  int j;
2758  AVIOContext *sdp_pb;
2759  AVFormatContext **avc;
2760 
2761  for (i = 0; i < nb_output_files; i++) {
2762  if (!output_files[i]->header_written)
2763  return;
2764  }
2765 
2766  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2767  if (!avc)
2768  exit_program(1);
2769  for (i = 0, j = 0; i < nb_output_files; i++) {
2770  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2771  avc[j] = output_files[i]->ctx;
2772  j++;
2773  }
2774  }
2775 
2776  if (!j)
2777  goto fail;
2778 
2779  av_sdp_create(avc, j, sdp, sizeof(sdp));
2780 
2781  if (!sdp_filename) {
2782  printf("SDP:\n%s\n", sdp);
2783  fflush(stdout);
2784  } else {
2785  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2786  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2787  } else {
2788  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2789  avio_closep(&sdp_pb);
2791  }
2792  }
2793 
2794 fail:
2795  av_freep(&avc);
2796 }
2797 
2799 {
2800  InputStream *ist = s->opaque;
2801  const enum AVPixelFormat *p;
2802  int ret;
2803 
2804  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2806  const AVCodecHWConfig *config = NULL;
2807  int i;
2808 
2809  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2810  break;
2811 
2812  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2813  ist->hwaccel_id == HWACCEL_AUTO) {
2814  for (i = 0;; i++) {
2815  config = avcodec_get_hw_config(s->codec, i);
2816  if (!config)
2817  break;
2818  if (!(config->methods &
2820  continue;
2821  if (config->pix_fmt == *p)
2822  break;
2823  }
2824  }
2825  if (config) {
2826  if (config->device_type != ist->hwaccel_device_type) {
2827  // Different hwaccel offered, ignore.
2828  continue;
2829  }
2830 
2832  if (ret < 0) {
2833  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2835  "%s hwaccel requested for input stream #%d:%d, "
2836  "but cannot be initialized.\n",
2838  ist->file_index, ist->st->index);
2839  return AV_PIX_FMT_NONE;
2840  }
2841  continue;
2842  }
2843  } else {
2844  const HWAccel *hwaccel = NULL;
2845  int i;
2846  for (i = 0; hwaccels[i].name; i++) {
2847  if (hwaccels[i].pix_fmt == *p) {
2848  hwaccel = &hwaccels[i];
2849  break;
2850  }
2851  }
2852  if (!hwaccel) {
2853  // No hwaccel supporting this pixfmt.
2854  continue;
2855  }
2856  if (hwaccel->id != ist->hwaccel_id) {
2857  // Does not match requested hwaccel.
2858  continue;
2859  }
2860 
2861  ret = hwaccel->init(s);
2862  if (ret < 0) {
2864  "%s hwaccel requested for input stream #%d:%d, "
2865  "but cannot be initialized.\n", hwaccel->name,
2866  ist->file_index, ist->st->index);
2867  return AV_PIX_FMT_NONE;
2868  }
2869  }
2870 
2871  if (ist->hw_frames_ctx) {
2872  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2873  if (!s->hw_frames_ctx)
2874  return AV_PIX_FMT_NONE;
2875  }
2876 
2877  ist->hwaccel_pix_fmt = *p;
2878  break;
2879  }
2880 
2881  return *p;
2882 }
2883 
2885 {
2886  InputStream *ist = s->opaque;
2887 
2888  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2889  return ist->hwaccel_get_buffer(s, frame, flags);
2890 
2892 }
2893 
2894 static int init_input_stream(int ist_index, char *error, int error_len)
2895 {
2896  int ret;
2897  InputStream *ist = input_streams[ist_index];
2898 
2899  if (ist->decoding_needed) {
2900  AVCodec *codec = ist->dec;
2901  if (!codec) {
2902  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2903  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2904  return AVERROR(EINVAL);
2905  }
2906 
2907  ist->dec_ctx->opaque = ist;
2908  ist->dec_ctx->get_format = get_format;
2909  ist->dec_ctx->get_buffer2 = get_buffer;
2910  ist->dec_ctx->thread_safe_callbacks = 1;
2911 
2912  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2913  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2914  (ist->decoding_needed & DECODING_FOR_OST)) {
2915  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2917  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2918  }
2919 
2920  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2921 
2922  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2923  * audio, and video decoders such as cuvid or mediacodec */
2924  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2925 
2926  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2927  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2928  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2930  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2931 
2933  if (ret < 0) {
2934  snprintf(error, error_len, "Device setup failed for "
2935  "decoder on input stream #%d:%d : %s",
2936  ist->file_index, ist->st->index, av_err2str(ret));
2937  return ret;
2938  }
2939 
2940  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2941  if (ret == AVERROR_EXPERIMENTAL)
2942  abort_codec_experimental(codec, 0);
2943 
2944  snprintf(error, error_len,
2945  "Error while opening decoder for input stream "
2946  "#%d:%d : %s",
2947  ist->file_index, ist->st->index, av_err2str(ret));
2948  return ret;
2949  }
2951  }
2952 
2953  ist->next_pts = AV_NOPTS_VALUE;
2954  ist->next_dts = AV_NOPTS_VALUE;
2955 
2956  return 0;
2957 }
2958 
2960 {
2961  if (ost->source_index >= 0)
2962  return input_streams[ost->source_index];
2963  return NULL;
2964 }
2965 
2966 static int compare_int64(const void *a, const void *b)
2967 {
2968  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2969 }
2970 
2971 /* open the muxer when all the streams are initialized */
2972 static int check_init_output_file(OutputFile *of, int file_index)
2973 {
2974  int ret, i;
2975 
2976  for (i = 0; i < of->ctx->nb_streams; i++) {
2978  if (!ost->initialized)
2979  return 0;
2980  }
2981 
2982  of->ctx->interrupt_callback = int_cb;
2983 
2984  ret = avformat_write_header(of->ctx, &of->opts);
2985  if (ret < 0) {
2987  "Could not write header for output file #%d "
2988  "(incorrect codec parameters ?): %s\n",
2989  file_index, av_err2str(ret));
2990  return ret;
2991  }
2992  //assert_avoptions(of->opts);
2993  of->header_written = 1;
2994 
2995  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2996 
2997  if (sdp_filename || want_sdp)
2998  print_sdp();
2999 
3000  /* flush the muxing queues */
3001  for (i = 0; i < of->ctx->nb_streams; i++) {
3003 
3004  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3005  if (!av_fifo_size(ost->muxing_queue))
3006  ost->mux_timebase = ost->st->time_base;
3007 
3008  while (av_fifo_size(ost->muxing_queue)) {
3009  AVPacket pkt;
3010  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3011  write_packet(of, &pkt, ost, 1);
3012  }
3013  }
3014 
3015  return 0;
3016 }
3017 
3019 {
3020  AVBSFContext *ctx;
3021  int i, ret;
3022 
3023  if (!ost->nb_bitstream_filters)
3024  return 0;
3025 
3026  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3027  ctx = ost->bsf_ctx[i];
3028 
3029  ret = avcodec_parameters_copy(ctx->par_in,
3030  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3031  if (ret < 0)
3032  return ret;
3033 
3034  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3035 
3036  ret = av_bsf_init(ctx);
3037  if (ret < 0) {
3038  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3039  ost->bsf_ctx[i]->filter->name);
3040  return ret;
3041  }
3042  }
3043 
3044  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3045  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3046  if (ret < 0)
3047  return ret;
3048 
3049  ost->st->time_base = ctx->time_base_out;
3050 
3051  return 0;
3052 }
3053 
3055 {
3056  OutputFile *of = output_files[ost->file_index];
3058  AVCodecParameters *par_dst = ost->st->codecpar;
3059  AVCodecParameters *par_src = ost->ref_par;
3060  AVRational sar;
3061  int i, ret;
3062  uint32_t codec_tag = par_dst->codec_tag;
3063 
3064  av_assert0(ist && !ost->filter);
3065 
3066  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3067  if (ret >= 0)
3068  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3069  if (ret < 0) {
3071  "Error setting up codec context options.\n");
3072  return ret;
3073  }
3074 
3075  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3076  if (ret < 0) {
3078  "Error getting reference codec parameters.\n");
3079  return ret;
3080  }
3081 
3082  if (!codec_tag) {
3083  unsigned int codec_tag_tmp;
3084  if (!of->ctx->oformat->codec_tag ||
3085  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3086  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3087  codec_tag = par_src->codec_tag;
3088  }
3089 
3090  ret = avcodec_parameters_copy(par_dst, par_src);
3091  if (ret < 0)
3092  return ret;
3093 
3094  par_dst->codec_tag = codec_tag;
3095 
3096  if (!ost->frame_rate.num)
3097  ost->frame_rate = ist->framerate;
3098  ost->st->avg_frame_rate = ost->frame_rate;
3099 
3101  if (ret < 0)
3102  return ret;
3103 
3104  // copy timebase while removing common factors
3105  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3107 
3108  // copy estimated duration as a hint to the muxer
3109  if (ost->st->duration <= 0 && ist->st->duration > 0)
3110  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3111 
3112  // copy disposition
3113  ost->st->disposition = ist->st->disposition;
3114 
3115  if (ist->st->nb_side_data) {
3116  for (i = 0; i < ist->st->nb_side_data; i++) {
3117  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3118  uint8_t *dst_data;
3119 
3120  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3121  if (!dst_data)
3122  return AVERROR(ENOMEM);
3123  memcpy(dst_data, sd_src->data, sd_src->size);
3124  }
3125  }
3126 
3127  if (ost->rotate_overridden) {
3129  sizeof(int32_t) * 9);
3130  if (sd)
3131  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3132  }
3133 
3134  switch (par_dst->codec_type) {
3135  case AVMEDIA_TYPE_AUDIO:
3136  if (audio_volume != 256) {
3137  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3138  exit_program(1);
3139  }
3140  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3141  par_dst->block_align= 0;
3142  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3143  par_dst->block_align= 0;
3144  break;
3145  case AVMEDIA_TYPE_VIDEO:
3146  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3147  sar =
3148  av_mul_q(ost->frame_aspect_ratio,
3149  (AVRational){ par_dst->height, par_dst->width });
3150  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3151  "with stream copy may produce invalid files\n");
3152  }
3153  else if (ist->st->sample_aspect_ratio.num)
3154  sar = ist->st->sample_aspect_ratio;
3155  else
3156  sar = par_src->sample_aspect_ratio;
3157  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3158  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3159  ost->st->r_frame_rate = ist->st->r_frame_rate;
3160  break;
3161  }
3162 
3163  ost->mux_timebase = ist->st->time_base;
3164 
3165  return 0;
3166 }
3167 
3169 {
3170  AVDictionaryEntry *e;
3171 
3172  uint8_t *encoder_string;
3173  int encoder_string_len;
3174  int format_flags = 0;
3175  int codec_flags = ost->enc_ctx->flags;
3176 
3177  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3178  return;
3179 
3180  e = av_dict_get(of->opts, "fflags", NULL, 0);
3181  if (e) {
3182  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3183  if (!o)
3184  return;
3185  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3186  }
3187  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3188  if (e) {
3189  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3190  if (!o)
3191  return;
3192  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3193  }
3194 
3195  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3196  encoder_string = av_mallocz(encoder_string_len);
3197  if (!encoder_string)
3198  exit_program(1);
3199 
3200  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3201  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3202  else
3203  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3204  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3205  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3207 }
3208 
3210  AVCodecContext *avctx)
3211 {
3212  char *p;
3213  int n = 1, i, size, index = 0;
3214  int64_t t, *pts;
3215 
3216  for (p = kf; *p; p++)
3217  if (*p == ',')
3218  n++;
3219  size = n;
3220  pts = av_malloc_array(size, sizeof(*pts));
3221  if (!pts) {
3222  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3223  exit_program(1);
3224  }
3225 
3226  p = kf;
3227  for (i = 0; i < n; i++) {
3228  char *next = strchr(p, ',');
3229 
3230  if (next)
3231  *next++ = 0;
3232 
3233  if (!memcmp(p, "chapters", 8)) {
3234 
3235  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3236  int j;
3237 
3238  if (avf->nb_chapters > INT_MAX - size ||
3239  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3240  sizeof(*pts)))) {
3242  "Could not allocate forced key frames array.\n");
3243  exit_program(1);
3244  }
3245  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3246  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3247 
3248  for (j = 0; j < avf->nb_chapters; j++) {
3249  AVChapter *c = avf->chapters[j];
3250  av_assert1(index < size);
3251  pts[index++] = av_rescale_q(c->start, c->time_base,
3252  avctx->time_base) + t;
3253  }
3254 
3255  } else {
3256 
3257  t = parse_time_or_die("force_key_frames", p, 1);
3258  av_assert1(index < size);
3259  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3260 
3261  }
3262 
3263  p = next;
3264  }
3265 
3266  av_assert0(index == size);
3267  qsort(pts, size, sizeof(*pts), compare_int64);
3268  ost->forced_kf_count = size;
3269  ost->forced_kf_pts = pts;
3270 }
3271 
3272 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3273 {
3275  AVCodecContext *enc_ctx = ost->enc_ctx;
3276  AVFormatContext *oc;
3277 
3278  if (ost->enc_timebase.num > 0) {
3279  enc_ctx->time_base = ost->enc_timebase;
3280  return;
3281  }
3282 
3283  if (ost->enc_timebase.num < 0) {
3284  if (ist) {
3285  enc_ctx->time_base = ist->st->time_base;
3286  return;
3287  }
3288 
3289  oc = output_files[ost->file_index]->ctx;
3290  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3291  }
3292 
3293  enc_ctx->time_base = default_time_base;
3294 }
3295 
3297 {
3299  AVCodecContext *enc_ctx = ost->enc_ctx;
3301  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3302  int j, ret;
3303 
3304  set_encoder_id(output_files[ost->file_index], ost);
3305 
3306  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3307  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3308  // which have to be filtered out to prevent leaking them to output files.
3309  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3310 
3311  if (ist) {
3312  ost->st->disposition = ist->st->disposition;
3313 
3314  dec_ctx = ist->dec_ctx;
3315 
3317  } else {
3318  for (j = 0; j < oc->nb_streams; j++) {
3319  AVStream *st = oc->streams[j];
3320  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3321  break;
3322  }
3323  if (j == oc->nb_streams)
3324  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3327  }
3328 
3329  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3330  if (!ost->frame_rate.num)
3331  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3332  if (ist && !ost->frame_rate.num)
3333  ost->frame_rate = ist->framerate;
3334  if (ist && !ost->frame_rate.num)
3335  ost->frame_rate = ist->st->r_frame_rate;
3336  if (ist && !ost->frame_rate.num) {
3337  ost->frame_rate = (AVRational){25, 1};
3339  "No information "
3340  "about the input framerate is available. Falling "
3341  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3342  "if you want a different framerate.\n",
3343  ost->file_index, ost->index);
3344  }
3345 
3346  if (ost->enc->supported_framerates && !ost->force_fps) {
3347  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3348  ost->frame_rate = ost->enc->supported_framerates[idx];
3349  }
3350  // reduce frame rate for mpeg4 to be within the spec limits
3351  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3352  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3353  ost->frame_rate.num, ost->frame_rate.den, 65535);
3354  }
3355  }
3356 
3357  switch (enc_ctx->codec_type) {
3358  case AVMEDIA_TYPE_AUDIO:
3359  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3360  if (dec_ctx)
3362  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3363  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3364  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3365  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3366 
3368  break;
3369 
3370  case AVMEDIA_TYPE_VIDEO:
3371  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3372 
3373  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3374  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3375  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3377  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3378  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3379  }
3380  for (j = 0; j < ost->forced_kf_count; j++)
3381  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3383  enc_ctx->time_base);
3384 
3385  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3386  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3387  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3388  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3389  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3390  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3391 
3392  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3393  if (dec_ctx)
3395  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3396 
3397  enc_ctx->framerate = ost->frame_rate;
3398 
3399  ost->st->avg_frame_rate = ost->frame_rate;
3400 
3401  if (!dec_ctx ||
3402  enc_ctx->width != dec_ctx->width ||
3403  enc_ctx->height != dec_ctx->height ||
3404  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3406  }
3407 
3408  if (ost->top_field_first == 0) {
3409  enc_ctx->field_order = AV_FIELD_BB;
3410  } else if (ost->top_field_first == 1) {
3411  enc_ctx->field_order = AV_FIELD_TT;
3412  }
3413 
3414  if (ost->forced_keyframes) {
3415  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3416  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3418  if (ret < 0) {
3420  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3421  return ret;
3422  }
3423  ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3424  ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3425  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3426  ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3427 
3428  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3429  // parse it only for static kf timings
3430  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3431  parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3432  }
3433  }
3434  break;
3435  case AVMEDIA_TYPE_SUBTITLE:
3436  enc_ctx->time_base = AV_TIME_BASE_Q;
3437  if (!enc_ctx->width) {
3438  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3439  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3440  }
3441  break;
3442  case AVMEDIA_TYPE_DATA:
3443  break;
3444  default:
3445  abort();
3446  break;
3447  }
3448 
3449  ost->mux_timebase = enc_ctx->time_base;
3450 
3451  return 0;
3452 }
3453 
3454 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3455 {
3456  int ret = 0;
3457 
3458  if (ost->encoding_needed) {
3459  AVCodec *codec = ost->enc;
3460  AVCodecContext *dec = NULL;
3461  InputStream *ist;
3462 
3464  if (ret < 0)
3465  return ret;
3466 
3467  if ((ist = get_input_stream(ost)))
3468  dec = ist->dec_ctx;
3469  if (dec && dec->subtitle_header) {
3470  /* ASS code assumes this buffer is null terminated so add extra byte. */
3471  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3472  if (!ost->enc_ctx->subtitle_header)
3473  return AVERROR(ENOMEM);
3474  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3475  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3476  }
3477  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3478  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3479  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3480  !codec->defaults &&
3481  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3482  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3483  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3484 
3485  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3486  ((AVHWFramesContext*)av_buffersink_get_hw_frames_ctx(ost->filter->filter)->data)->format ==
3487  av_buffersink_get_format(ost->filter->filter)) {
3488  ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
3489  if (!ost->enc_ctx->hw_frames_ctx)
3490  return AVERROR(ENOMEM);
3491  } else {
3493  if (ret < 0) {
3494  snprintf(error, error_len, "Device setup failed for "
3495  "encoder on output stream #%d:%d : %s",
3496  ost->file_index, ost->index, av_err2str(ret));
3497  return ret;
3498  }
3499  }
3500  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3501  int input_props = 0, output_props = 0;
3502  AVCodecDescriptor const *input_descriptor =
3504  AVCodecDescriptor const *output_descriptor =
3505  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3506  if (input_descriptor)
3507  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3508  if (output_descriptor)
3509  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3510  if (input_props && output_props && input_props != output_props) {
3511  snprintf(error, error_len,
3512  "Subtitle encoding currently only possible from text to text "
3513  "or bitmap to bitmap");
3514  return AVERROR_INVALIDDATA;
3515  }
3516  }
3517 
3518  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3519  if (ret == AVERROR_EXPERIMENTAL)
3520  abort_codec_experimental(codec, 1);
3521  snprintf(error, error_len,
3522  "Error while opening encoder for output stream #%d:%d - "
3523  "maybe incorrect parameters such as bit_rate, rate, width or height",
3524  ost->file_index, ost->index);
3525  return ret;
3526  }
3527  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3528  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3529  av_buffersink_set_frame_size(ost->filter->filter,
3530  ost->enc_ctx->frame_size);
3531  assert_avoptions(ost->encoder_opts);
3532  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3533  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3534  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3535  " It takes bits/s as argument, not kbits/s\n");
3536 
3538  if (ret < 0) {
3540  "Error initializing the output stream codec context.\n");
3541  exit_program(1);
3542  }
3543  /*
3544  * FIXME: ost->st->codec should't be needed here anymore.
3545  */
3546  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3547  if (ret < 0)
3548  return ret;
3549 
3550  if (ost->enc_ctx->nb_coded_side_data) {
3551  int i;
3552 
3553  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3554  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3555  uint8_t *dst_data;
3556 
3557  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3558  if (!dst_data)
3559  return AVERROR(ENOMEM);
3560  memcpy(dst_data, sd_src->data, sd_src->size);
3561  }
3562  }
3563 
3564  /*
3565  * Add global input side data. For now this is naive, and copies it
3566  * from the input stream's global side data. All side data should
3567  * really be funneled over AVFrame and libavfilter, then added back to
3568  * packet side data, and then potentially using the first packet for
3569  * global side data.
3570  */
3571  if (ist) {
3572  int i;
3573  for (i = 0; i < ist->st->nb_side_data; i++) {
3574  AVPacketSideData *sd = &ist->st->side_data[i];
3575  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3576  if (!dst)
3577  return AVERROR(ENOMEM);
3578  memcpy(dst, sd->data, sd->size);
3579  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3580  av_display_rotation_set((uint32_t *)dst, 0);
3581  }
3582  }
3583 
3584  // copy timebase while removing common factors
3585  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3586  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3587 
3588  // copy estimated duration as a hint to the muxer
3589  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3590  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3591 
3592  ost->st->codec->codec= ost->enc_ctx->codec;
3593  } else if (ost->stream_copy) {
3595  if (ret < 0)
3596  return ret;
3597  }
3598 
3599  // parse user provided disposition, and update stream values
3600  if (ost->disposition) {
3601  static const AVOption opts[] = {
3602  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3603  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3604  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3605  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3606  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3607  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3608  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3609  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3610  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3611  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3612  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3613  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3614  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3615  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3616  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3617  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3618  { NULL },
3619  };
3620  static const AVClass class = {
3621  .class_name = "",
3622  .item_name = av_default_item_name,
3623  .option = opts,
3624  .version = LIBAVUTIL_VERSION_INT,
3625  };
3626  const AVClass *pclass = &class;
3627 
3628  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3629  if (ret < 0)
3630  return ret;
3631  }
3632 
3633  /* initialize bitstream filters for the output stream
3634  * needs to be done here, because the codec id for streamcopy is not
3635  * known until now */
3637  if (ret < 0)
3638  return ret;
3639 
3640  ost->initialized = 1;
3641 
3642  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3643  if (ret < 0)
3644  return ret;
3645 
3646  return ret;
3647 }
3648 
3649 static void report_new_stream(int input_index, AVPacket *pkt)
3650 {
3651  InputFile *file = input_files[input_index];
3652  AVStream *st = file->ctx->streams[pkt->stream_index];
3653 
3654  if (pkt->stream_index < file->nb_streams_warn)
3655  return;
3656  av_log(file->ctx, AV_LOG_WARNING,
3657  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3659  input_index, pkt->stream_index,
3660  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3661  file->nb_streams_warn = pkt->stream_index + 1;
3662 }
3663 
3664 static int transcode_init(void)
3665 {
3666  int ret = 0, i, j, k;
3667  AVFormatContext *oc;
3668  OutputStream *ost;
3669  InputStream *ist;
3670  char error[1024] = {0};
3671 
3672  for (i = 0; i < nb_filtergraphs; i++) {
3673  FilterGraph *fg = filtergraphs[i];
3674  for (j = 0; j < fg->nb_outputs; j++) {
3675  OutputFilter *ofilter = fg->outputs[j];
3676  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3677  continue;
3678  if (fg->nb_inputs != 1)
3679  continue;
3680  for (k = nb_input_streams-1; k >= 0 ; k--)
3681  if (fg->inputs[0]->ist == input_streams[k])
3682  break;
3683  ofilter->ost->source_index = k;
3684  }
3685  }
3686 
3687  /* init framerate emulation */
3688  for (i = 0; i < nb_input_files; i++) {
3690  if (ifile->rate_emu)
3691  for (j = 0; j < ifile->nb_streams; j++)
3692  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3693  }
3694 
3695  /* init input streams */
3696  for (i = 0; i < nb_input_streams; i++)
3697  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3698  for (i = 0; i < nb_output_streams; i++) {
3699  ost = output_streams[i];
3700  avcodec_close(ost->enc_ctx);
3701  }
3702  goto dump_format;
3703  }
3704 
3705  /* open each encoder */
3706  for (i = 0; i < nb_output_streams; i++) {
3707  // skip streams fed from filtergraphs until we have a frame for them
3708  if (output_streams[i]->filter)
3709  continue;
3710 
3712  if (ret < 0)
3713  goto dump_format;
3714  }
3715 
3716  /* discard unused programs */
3717  for (i = 0; i < nb_input_files; i++) {
3719  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3720  AVProgram *p = ifile->ctx->programs[j];
3721  int discard = AVDISCARD_ALL;
3722 
3723  for (k = 0; k < p->nb_stream_indexes; k++)
3724  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3725  discard = AVDISCARD_DEFAULT;
3726  break;
3727  }
3728  p->discard = discard;
3729  }
3730  }
3731 
3732  /* write headers for files with no streams */
3733  for (i = 0; i < nb_output_files; i++) {
3734  oc = output_files[i]->ctx;
3735  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3737  if (ret < 0)
3738  goto dump_format;
3739  }
3740  }
3741 
3742  dump_format:
3743  /* dump the stream mapping */
3744  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3745  for (i = 0; i < nb_input_streams; i++) {
3746  ist = input_streams[i];
3747 
3748  for (j = 0; j < ist->nb_filters; j++) {
3749  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3750  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3751  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3752  ist->filters[j]->name);
3753  if (nb_filtergraphs > 1)
3754  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3755  av_log(NULL, AV_LOG_INFO, "\n");
3756  }
3757  }
3758  }
3759 
3760  for (i = 0; i < nb_output_streams; i++) {
3761  ost = output_streams[i];
3762 
3763  if (ost->attachment_filename) {
3764  /* an attached file */
3765  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3766  ost->attachment_filename, ost->file_index, ost->index);
3767  continue;
3768  }
3769 
3770  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3771  /* output from a complex graph */
3772  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3773  if (nb_filtergraphs > 1)
3774  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3775 
3776  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3777  ost->index, ost->enc ? ost->enc->name : "?");
3778  continue;
3779  }
3780 
3781  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3782  input_streams[ost->source_index]->file_index,
3783  input_streams[ost->source_index]->st->index,
3784  ost->file_index,
3785  ost->index);
3786  if (ost->sync_ist != input_streams[ost->source_index])
3787  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3788  ost->sync_ist->file_index,
3789  ost->sync_ist->st->index);
3790  if (ost->stream_copy)
3791  av_log(NULL, AV_LOG_INFO, " (copy)");
3792  else {
3793  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3794  const AVCodec *out_codec = ost->enc;
3795  const char *decoder_name = "?";
3796  const char *in_codec_name = "?";
3797  const char *encoder_name = "?";
3798  const char *out_codec_name = "?";
3799  const AVCodecDescriptor *desc;
3800 
3801  if (in_codec) {
3802  decoder_name = in_codec->name;
3803  desc = avcodec_descriptor_get(in_codec->id);
3804  if (desc)
3805  in_codec_name = desc->name;
3806  if (!strcmp(decoder_name, in_codec_name))
3807  decoder_name = "native";
3808  }
3809 
3810  if (out_codec) {
3811  encoder_name = out_codec->name;
3812  desc = avcodec_descriptor_get(out_codec->id);
3813  if (desc)
3814  out_codec_name = desc->name;
3815  if (!strcmp(encoder_name, out_codec_name))
3816  encoder_name = "native";
3817  }
3818 
3819  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3820  in_codec_name, decoder_name,
3821  out_codec_name, encoder_name);
3822  }
3823  av_log(NULL, AV_LOG_INFO, "\n");
3824  }
3825 
3826  if (ret) {
3827  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3828  return ret;
3829  }
3830 
3832 
3833  return 0;
3834 }
3835 
3836 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3837 static int need_output(void)
3838 {
3839  int i;
3840 
3841  for (i = 0; i < nb_output_streams; i++) {
3843  OutputFile *of = output_files[ost->file_index];
3844  AVFormatContext *os = output_files[ost->file_index]->ctx;
3845 
3846  if (ost->finished ||
3847  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3848  continue;
3849  if (ost->frame_number >= ost->max_frames) {
3850  int j;
3851  for (j = 0; j < of->ctx->nb_streams; j++)
3853  continue;
3854  }
3855 
3856  return 1;
3857  }
3858 
3859  return 0;
3860 }
3861 
3862 /**
3863  * Select the output stream to process.
3864  *
3865  * @return selected output stream, or NULL if none available
3866  */
3868 {
3869  int i;
3870  int64_t opts_min = INT64_MAX;
3871  OutputStream *ost_min = NULL;
3872 
3873  for (i = 0; i < nb_output_streams; i++) {
3875  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3876  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3877  AV_TIME_BASE_Q);
3878  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3880  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3881  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3882 
3883  if (!ost->initialized && !ost->inputs_done)
3884  return ost;
3885 
3886  if (!ost->finished && opts < opts_min) {
3887  opts_min = opts;
3888  ost_min = ost->unavailable ? NULL : ost;
3889  }
3890  }
3891  return ost_min;
3892 }
3893 
3894 static void set_tty_echo(int on)
3895 {
3896 #if HAVE_TERMIOS_H
3897  struct termios tty;
3898  if (tcgetattr(0, &tty) == 0) {
3899  if (on) tty.c_lflag |= ECHO;
3900  else tty.c_lflag &= ~ECHO;
3901  tcsetattr(0, TCSANOW, &tty);
3902  }
3903 #endif
3904 }
3905 
3906 static int check_keyboard_interaction(int64_t cur_time)
3907 {
3908  int i, ret, key;
3909  static int64_t last_time;
3910  if (received_nb_signals)
3911  return AVERROR_EXIT;
3912  /* read_key() returns 0 on EOF */
3913  if(cur_time - last_time >= 100000 && !run_as_daemon){
3914  key = read_key();
3915  last_time = cur_time;
3916  }else
3917  key = -1;
3918  if (key == 'q')
3919  return AVERROR_EXIT;
3920  if (key == '+') av_log_set_level(av_log_get_level()+10);
3921  if (key == '-') av_log_set_level(av_log_get_level()-10);
3922  if (key == 's') qp_hist ^= 1;
3923  if (key == 'h'){
3924  if (do_hex_dump){
3925  do_hex_dump = do_pkt_dump = 0;
3926  } else if(do_pkt_dump){
3927  do_hex_dump = 1;
3928  } else
3929  do_pkt_dump = 1;
3931  }
3932  if (key == 'c' || key == 'C'){
3933  char buf[4096], target[64], command[256], arg[256] = {0};
3934  double time;
3935  int k, n = 0;
3936  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3937  i = 0;
3938  set_tty_echo(1);
3939  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3940  if (k > 0)
3941  buf[i++] = k;
3942  buf[i] = 0;
3943  set_tty_echo(0);
3944  fprintf(stderr, "\n");
3945  if (k > 0 &&
3946  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3947  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3948  target, time, command, arg);
3949  for (i = 0; i < nb_filtergraphs; i++) {
3950  FilterGraph *fg = filtergraphs[i];
3951  if (fg->graph) {
3952  if (time < 0) {
3953  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3954  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3955  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3956  } else if (key == 'c') {
3957  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3959  } else {
3960  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3961  if (ret < 0)
3962  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3963  }
3964  }
3965  }
3966  } else {
3968  "Parse error, at least 3 arguments were expected, "
3969  "only %d given in string '%s'\n", n, buf);
3970  }
3971  }
3972  if (key == 'd' || key == 'D'){
3973  int debug=0;
3974  if(key == 'D') {
3975  debug = input_streams[0]->st->codec->debug<<1;
3976  if(!debug) debug = 1;
3977  while(debug & (FF_DEBUG_DCT_COEFF
3978 #if FF_API_DEBUG_MV
3979  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3980 #endif
3981  )) //unsupported, would just crash
3982  debug += debug;
3983  }else{
3984  char buf[32];
3985  int k = 0;
3986  i = 0;
3987  set_tty_echo(1);
3988  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3989  if (k > 0)
3990  buf[i++] = k;
3991  buf[i] = 0;
3992  set_tty_echo(0);
3993  fprintf(stderr, "\n");
3994  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3995  fprintf(stderr,"error parsing debug value\n");
3996  }
3997  for(i=0;i<nb_input_streams;i++) {
3998  input_streams[i]->st->codec->debug = debug;
3999  }
4000  for(i=0;i<nb_output_streams;i++) {
4002  ost->enc_ctx->debug = debug;
4003  }
4004  if(debug) av_log_set_level(AV_LOG_DEBUG);
4005  fprintf(stderr,"debug=%d\n", debug);
4006  }
4007  if (key == '?'){
4008  fprintf(stderr, "key function\n"
4009  "? show this help\n"
4010  "+ increase verbosity\n"
4011  "- decrease verbosity\n"
4012  "c Send command to first matching filter supporting it\n"
4013  "C Send/Queue command to all matching filters\n"
4014  "D cycle through available debug modes\n"
4015  "h dump packets/hex press to cycle through the 3 states\n"
4016  "q quit\n"
4017  "s Show QP histogram\n"
4018  );
4019  }
4020  return 0;
4021 }
4022 
4023 #if HAVE_THREADS
4024 static void *input_thread(void *arg)
4025 {
4026  InputFile *f = arg;
4027  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4028  int ret = 0;
4029 
4030  while (1) {
4031  AVPacket pkt;
4032  ret = av_read_frame(f->ctx, &pkt);
4033 
4034  if (ret == AVERROR(EAGAIN)) {
4035  av_usleep(10000);
4036  continue;
4037  }
4038  if (ret < 0) {
4039  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4040  break;
4041  }
4042  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4043  if (flags && ret == AVERROR(EAGAIN)) {
4044  flags = 0;
4045  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4046  av_log(f->ctx, AV_LOG_WARNING,
4047  "Thread message queue blocking; consider raising the "
4048  "thread_queue_size option (current value: %d)\n",
4049  f->thread_queue_size);
4050  }
4051  if (ret < 0) {
4052  if (ret != AVERROR_EOF)
4053  av_log(f->ctx, AV_LOG_ERROR,
4054  "Unable to send packet to main thread: %s\n",
4055  av_err2str(ret));
4056  av_packet_unref(&pkt);
4057  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4058  break;
4059  }
4060  }
4061 
4062  return NULL;
4063 }
4064 
4065 static void free_input_thread(int i)
4066 {
4067  InputFile *f = input_files[i];
4068  AVPacket pkt;
4069 
4070  if (!f || !f->in_thread_queue)
4071  return;
4073  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4074  av_packet_unref(&pkt);
4075 
4076  pthread_join(f->thread, NULL);
4077  f->joined = 1;
4078  av_thread_message_queue_free(&f->in_thread_queue);
4079 }
4080 
4081 static void free_input_threads(void)
4082 {
4083  int i;
4084 
4085  for (i = 0; i < nb_input_files; i++)
4086  free_input_thread(i);
4087 }
4088 
4089 static int init_input_thread(int i)
4090 {
4091  int ret;
4092  InputFile *f = input_files[i];
4093 
4094  if (nb_input_files == 1)
4095  return 0;
4096 
4097  if (f->ctx->pb ? !f->ctx->pb->seekable :
4098  strcmp(f->ctx->iformat->name, "lavfi"))
4099  f->non_blocking = 1;
4100  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4101  f->thread_queue_size, sizeof(AVPacket));
4102  if (ret < 0)
4103  return ret;
4104 
4105  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4106  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4107  av_thread_message_queue_free(&f->in_thread_queue);
4108  return AVERROR(ret);
4109  }
4110 
4111  return 0;
4112 }
4113 
4114 static int init_input_threads(void)
4115 {
4116  int i, ret;
4117 
4118  for (i = 0; i < nb_input_files; i++) {
4119  ret = init_input_thread(i);
4120  if (ret < 0)
4121  return ret;
4122  }
4123  return 0;
4124 }
4125 
4126 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4127 {
4128  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4129  f->non_blocking ?
4131 }
4132 #endif
4133 
4135 {
4136  if (f->rate_emu) {
4137  int i;
4138  for (i = 0; i < f->nb_streams; i++) {
4139  InputStream *ist = input_streams[f->ist_index + i];
4140  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4141  int64_t now = av_gettime_relative() - ist->start;
4142  if (pts > now)
4143  return AVERROR(EAGAIN);
4144  }
4145  }
4146 
4147 #if HAVE_THREADS
4148  if (nb_input_files > 1)
4149  return get_input_packet_mt(f, pkt);
4150 #endif
4151  return av_read_frame(f->ctx, pkt);
4152 }
4153 
4154 static int got_eagain(void)
4155 {
4156  int i;
4157  for (i = 0; i < nb_output_streams; i++)
4158  if (output_streams[i]->unavailable)
4159  return 1;
4160  return 0;
4161 }
4162 
4163 static void reset_eagain(void)
4164 {
4165  int i;
4166  for (i = 0; i < nb_input_files; i++)
4167  input_files[i]->eagain = 0;
4168  for (i = 0; i < nb_output_streams; i++)
4169  output_streams[i]->unavailable = 0;
4170 }
4171 
4172 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4173 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4174  AVRational time_base)
4175 {
4176  int ret;
4177 
4178  if (!*duration) {
4179  *duration = tmp;
4180  return tmp_time_base;
4181  }
4182 
4183  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4184  if (ret < 0) {
4185  *duration = tmp;
4186  return tmp_time_base;
4187  }
4188 
4189  return time_base;
4190 }
4191 
4193 {
4194  InputStream *ist;
4195  AVCodecContext *avctx;
4196  int i, ret, has_audio = 0;
4197  int64_t duration = 0;
4198 
4199  ret = av_seek_frame(is, -1, is->start_time, 0);
4200  if (ret < 0)
4201  return ret;
4202 
4203  for (i = 0; i < ifile->nb_streams; i++) {
4204  ist = input_streams[ifile->ist_index + i];
4205  avctx = ist->dec_ctx;
4206 
4207  /* duration is the length of the last frame in a stream
4208  * when audio stream is present we don't care about
4209  * last video frame length because it's not defined exactly */
4210  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4211  has_audio = 1;
4212  }
4213 
4214  for (i = 0; i < ifile->nb_streams; i++) {
4215  ist = input_streams[ifile->ist_index + i];
4216  avctx = ist->dec_ctx;
4217 
4218  if (has_audio) {
4219  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4220  AVRational sample_rate = {1, avctx->sample_rate};
4221 
4223  } else {
4224  continue;
4225  }
4226  } else {
4227  if (ist->framerate.num) {
4228  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4229  } else if (ist->st->avg_frame_rate.num) {
4231  } else {
4232  duration = 1;
4233  }
4234  }
4235  if (!ifile->duration)
4236  ifile->time_base = ist->st->time_base;
4237  /* the total duration of the stream, max_pts - min_pts is
4238  * the duration of the stream without the last frame */
4239  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4240  duration += ist->max_pts - ist->min_pts;
4241  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4242  ifile->time_base);
4243  }
4244 
4245  if (ifile->loop > 0)
4246  ifile->loop--;
4247 
4248  return ret;
4249 }
4250 
4251 /*
4252  * Return
4253  * - 0 -- one packet was read and processed
4254  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4255  * this function should be called again
4256  * - AVERROR_EOF -- this function should not be called again
4257  */
4258 static int process_input(int file_index)
4259 {
4260  InputFile *ifile = input_files[file_index];
4262  InputStream *ist;
4263  AVPacket pkt;
4264  int ret, thread_ret, i, j;
4265  int64_t duration;
4266  int64_t pkt_dts;
4267 
4268  is = ifile->ctx;
4270 
4271  if (ret == AVERROR(EAGAIN)) {
4272  ifile->eagain = 1;
4273  return ret;
4274  }
4275  if (ret < 0 && ifile->loop) {
4276  AVCodecContext *avctx;
4277  for (i = 0; i < ifile->nb_streams; i++) {
4278  ist = input_streams[ifile->ist_index + i];
4279  avctx = ist->dec_ctx;
4280  if (ist->decoding_needed) {
4281  ret = process_input_packet(ist, NULL, 1);
4282  if (ret>0)
4283  return 0;
4284  avcodec_flush_buffers(avctx);
4285  }
4286  }
4287 #if HAVE_THREADS
4288  free_input_thread(file_index);
4289 #endif
4290  ret = seek_to_start(ifile, is);
4291 #if HAVE_THREADS
4292  thread_ret = init_input_thread(file_index);
4293  if (thread_ret < 0)
4294  return thread_ret;
4295 #endif
4296  if (ret < 0)
4297  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4298  else
4300  if (ret == AVERROR(EAGAIN)) {
4301  ifile->eagain = 1;
4302  return ret;
4303  }
4304  }
4305  if (ret < 0) {
4306  if (ret != AVERROR_EOF) {
4307  print_error(is->url, ret);
4308  if (exit_on_error)
4309  exit_program(1);
4310  }
4311 
4312  for (i = 0; i < ifile->nb_streams; i++) {
4313  ist = input_streams[ifile->ist_index + i];
4314  if (ist->decoding_needed) {
4315  ret = process_input_packet(ist, NULL, 0);
4316  if (ret>0)
4317  return 0;
4318  }
4319 
4320  /* mark all outputs that don't go through lavfi as finished */
4321  for (j = 0; j < nb_output_streams; j++) {
4323 
4324  if (ost->source_index == ifile->ist_index + i &&
4325  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4327  }
4328  }
4329 
4330  ifile->eof_reached = 1;
4331  return AVERROR(EAGAIN);
4332  }
4333 
4334  reset_eagain();
4335 
4336  if (do_pkt_dump) {
4338  is->streams[pkt.stream_index]);
4339  }
4340  /* the following test is needed in case new streams appear
4341  dynamically in stream : we ignore them */
4342  if (pkt.stream_index >= ifile->nb_streams) {
4343  report_new_stream(file_index, &pkt);
4344  goto discard_packet;
4345  }
4346 
4347  ist = input_streams[ifile->ist_index + pkt.stream_index];
4348 
4349  ist->data_size += pkt.size;
4350  ist->nb_packets++;
4351 
4352  if (ist->discard)
4353  goto discard_packet;
4354 
4355  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4357  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4358  if (exit_on_error)
4359  exit_program(1);
4360  }
4361 
4362  if (debug_ts) {
4363  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4364  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4372  }
4373 
4374  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4375  int64_t stime, stime2;
4376  // Correcting starttime based on the enabled streams
4377  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4378  // so we instead do it here as part of discontinuity handling
4379  if ( ist->next_dts == AV_NOPTS_VALUE
4380  && ifile->ts_offset == -is->start_time
4381  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4382  int64_t new_start_time = INT64_MAX;
4383  for (i=0; i<is->nb_streams; i++) {
4384  AVStream *st = is->streams[i];
4385  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4386  continue;
4387  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4388  }
4389  if (new_start_time > is->start_time) {
4390  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4391  ifile->ts_offset = -new_start_time;
4392  }
4393  }
4394 
4395  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4396  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4397  ist->wrap_correction_done = 1;
4398 
4399  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4400  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4401  ist->wrap_correction_done = 0;
4402  }
4403  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4404  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4405  ist->wrap_correction_done = 0;
4406  }
4407  }
4408 
4409  /* add the stream-global side data to the first packet */
4410  if (ist->nb_packets == 1) {
4411  for (i = 0; i < ist->st->nb_side_data; i++) {
4412  AVPacketSideData *src_sd = &ist->st->side_data[i];
4413  uint8_t *dst_data;
4414 
4415  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4416  continue;
4417 
4418  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4419  continue;
4420 
4421  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4422  if (!dst_data)
4423  exit_program(1);
4424 
4425  memcpy(dst_data, src_sd->data, src_sd->size);
4426  }
4427  }
4428 
4429  if (pkt.dts != AV_NOPTS_VALUE)
4430  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4431  if (pkt.pts != AV_NOPTS_VALUE)
4432  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4433 
4434  if (pkt.pts != AV_NOPTS_VALUE)
4435  pkt.pts *= ist->ts_scale;
4436  if (pkt.dts != AV_NOPTS_VALUE)
4437  pkt.dts *= ist->ts_scale;
4438 
4440  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4442  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4443  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4444  int64_t delta = pkt_dts - ifile->last_ts;
4445  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4447  ifile->ts_offset -= delta;
4449  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4450  delta, ifile->ts_offset);
4452  if (pkt.pts != AV_NOPTS_VALUE)
4454  }
4455  }
4456 
4457  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4458  if (pkt.pts != AV_NOPTS_VALUE) {
4459  pkt.pts += duration;
4460  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4461  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4462  }
4463 
4464  if (pkt.dts != AV_NOPTS_VALUE)
4465  pkt.dts += duration;
4466 
4468  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4470  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4471  !copy_ts) {
4472  int64_t delta = pkt_dts - ist->next_dts;
4473  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4474  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4476  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4477  ifile->ts_offset -= delta;
4479  "timestamp discontinuity for stream #%d:%d "
4480  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4481  ist->file_index, ist->st->index, ist->st->id,
4483  delta, ifile->ts_offset);
4485  if (pkt.pts != AV_NOPTS_VALUE)
4487  }
4488  } else {
4489  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4491  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4493  }
4494  if (pkt.pts != AV_NOPTS_VALUE){
4495  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4496  delta = pkt_pts - ist->next_dts;
4497  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4499  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4501  }
4502  }
4503  }
4504  }
4505 
4506  if (pkt.dts != AV_NOPTS_VALUE)
4507  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4508 
4509  if (debug_ts) {
4510  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4516  }
4517 
4518  sub2video_heartbeat(ist, pkt.pts);
4519 
4520  process_input_packet(ist, &pkt, 0);
4521 
4522 discard_packet:
4523  av_packet_unref(&pkt);
4524 
4525  return 0;
4526 }
4527 
4528 /**
4529  * Perform a step of transcoding for the specified filter graph.
4530  *
4531  * @param[in] graph filter graph to consider
4532  * @param[out] best_ist input stream where a frame would allow to continue
4533  * @return 0 for success, <0 for error
4534  */
4535 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4536 {
4537  int i, ret;
4538  int nb_requests, nb_requests_max = 0;
4539  InputFilter *ifilter;
4540  InputStream *ist;
4541 
4542  *best_ist = NULL;
4544  if (ret >= 0)
4545  return reap_filters(0);
4546 
4547  if (ret == AVERROR_EOF) {
4548  ret = reap_filters(1);
4549  for (i = 0; i < graph->nb_outputs; i++)
4550  close_output_stream(graph->outputs[i]->ost);
4551  return ret;
4552  }
4553  if (ret != AVERROR(EAGAIN))
4554  return ret;
4555 
4556  for (i = 0; i < graph->nb_inputs; i++) {
4557  ifilter = graph->inputs[i];
4558  ist = ifilter->ist;
4559  if (input_files[ist->file_index]->eagain ||
4561  continue;
4562  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4563  if (nb_requests > nb_requests_max) {
4564  nb_requests_max = nb_requests;
4565  *best_ist = ist;
4566  }
4567  }
4568 
4569  if (!*best_ist)
4570  for (i = 0; i < graph->nb_outputs; i++)
4571  graph->outputs[i]->ost->unavailable = 1;
4572 
4573  return 0;
4574 }
4575 
4576 /**
4577  * Run a single step of transcoding.
4578  *
4579  * @return 0 for success, <0 for error
4580  */
4581 static int transcode_step(void)
4582 {
4583  OutputStream *ost;
4584  InputStream *ist = NULL;
4585  int ret;
4586 
4587  ost = choose_output();
4588  if (!ost) {
4589  if (got_eagain()) {
4590  reset_eagain();
4591  av_usleep(10000);
4592  return 0;
4593  }
4594  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4595  return AVERROR_EOF;
4596  }
4597 
4598  if (ost->filter && !ost->filter->graph->graph) {
4599  if (ifilter_has_all_input_formats(ost->filter->graph)) {
4600  ret = configure_filtergraph(ost->filter->graph);
4601  if (ret < 0) {
4602  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4603  return ret;
4604  }
4605  }
4606  }
4607 
4608  if (ost->filter && ost->filter->graph->graph) {
4609  if (!ost->initialized) {
4610  char error[1024] = {0};
4611  ret = init_output_stream(ost, error, sizeof(error));
4612  if (ret < 0) {
4613  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4614  ost->file_index, ost->index, error);
4615  exit_program(1);
4616  }
4617  }
4618  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4619  return ret;
4620  if (!ist)
4621  return 0;
4622  } else if (ost->filter) {
4623  int i;
4624  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4625  InputFilter *ifilter = ost->filter->graph->inputs[i];
4626  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4627  ist = ifilter->ist;
4628  break;
4629  }
4630  }
4631  if (!ist) {
4632  ost->inputs_done = 1;
4633  return 0;
4634  }
4635  } else {
4636  av_assert0(ost->source_index >= 0);
4637  ist = input_streams[ost->source_index];
4638  }
4639 
4640  ret = process_input(ist->file_index);
4641  if (ret == AVERROR(EAGAIN)) {
4642  if (input_files[ist->file_index]->eagain)
4643  ost->unavailable = 1;
4644  return 0;
4645  }
4646 
4647  if (ret < 0)
4648  return ret == AVERROR_EOF ? 0 : ret;
4649 
4650  return reap_filters(0);
4651 }
4652 
4653 /*
4654  * The following code is the main loop of the file converter
4655  */
4656 static int transcode(void)
4657 {
4658  int ret, i;
4659  AVFormatContext *os;
4660  OutputStream *ost;
4661  InputStream *ist;
4662  int64_t timer_start;
4663  int64_t total_packets_written = 0;
4664 
4665  ret = transcode_init();
4666  if (ret < 0)
4667  goto fail;
4668 
4669  if (stdin_interaction) {
4670  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4671  }
4672 
4673  timer_start = av_gettime_relative();
4674 
4675 #if HAVE_THREADS
4676  if ((ret = init_input_threads()) < 0)
4677  goto fail;
4678 #endif
4679 
4680  while (!received_sigterm) {
4681  int64_t cur_time= av_gettime_relative();
4682 
4683  /* if 'q' pressed, exits */
4684  if (stdin_interaction)
4685  if (check_keyboard_interaction(cur_time) < 0)
4686  break;
4687 
4688  /* check if there's any stream where output is still needed */
4689  if (!need_output()) {
4690  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4691  break;
4692  }
4693 
4694  ret = transcode_step();
4695  if (ret < 0 && ret != AVERROR_EOF) {
4696  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4697  break;
4698  }
4699 
4700  /* dump report by using the output first video and audio streams */
4701  print_report(0, timer_start, cur_time);
4702  }
4703 #if HAVE_THREADS
4704  free_input_threads();
4705 #endif
4706 
4707  /* at the end of stream, we must flush the decoder buffers */
4708  for (i = 0; i < nb_input_streams; i++) {
4709  ist = input_streams[i];
4710  if (!input_files[ist->file_index]->eof_reached) {
4711  process_input_packet(ist, NULL, 0);
4712  }
4713  }
4714  flush_encoders();
4715 
4716  term_exit();
4717 
4718  /* write the trailer if needed and close file */
4719  for (i = 0; i < nb_output_files; i++) {
4720  os = output_files[i]->ctx;
4721  if (!output_files[i]->header_written) {
4723  "Nothing was written into output file %d (%s), because "
4724  "at least one of its streams received no packets.\n",
4725  i, os->url);
4726  continue;
4727  }
4728  if ((ret = av_write_trailer(os)) < 0) {
4729  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4730  if (exit_on_error)
4731  exit_program(1);
4732  }
4733  }
4734 
4735  /* dump report by using the first video and audio streams */
4736  print_report(1, timer_start, av_gettime_relative());
4737 
4738  /* close each encoder */
4739  for (i = 0; i < nb_output_streams; i++) {
4740  ost = output_streams[i];
4741  if (ost->encoding_needed) {
4742  av_freep(&ost->enc_ctx->stats_in);
4743  }
4744  total_packets_written += ost->packets_written;
4745  }
4746 
4747  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4748  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4749  exit_program(1);
4750  }
4751 
4752  /* close each decoder */
4753  for (i = 0; i < nb_input_streams; i++) {
4754  ist = input_streams[i];
4755  if (ist->decoding_needed) {
4756  avcodec_close(ist->dec_ctx);
4757  if (ist->hwaccel_uninit)
4758  ist->hwaccel_uninit(ist->dec_ctx);
4759  }
4760  }
4761 
4764 
4765  /* finished ! */
4766  ret = 0;
4767 
4768  fail:
4769 #if HAVE_THREADS
4770  free_input_threads();
4771 #endif
4772 
4773  if (output_streams) {
4774  for (i = 0; i < nb_output_streams; i++) {
4775  ost = output_streams[i];
4776  if (ost) {
4777  if (ost->logfile) {
4778  if (fclose(ost->logfile))
4780  "Error closing logfile, loss of information possible: %s\n",
4781  av_err2str(AVERROR(errno)));
4782  ost->logfile = NULL;
4783  }
4784  av_freep(&ost->forced_kf_pts);
4785  av_freep(&ost->apad);
4787  av_dict_free(&ost->encoder_opts);
4788  av_dict_free(&ost->sws_dict);
4789  av_dict_free(&ost->swr_opts);
4790  av_dict_free(&ost->resample_opts);
4791  }
4792  }
4793  }
4794  return ret;
4795 }
4796 
4798 {
4799  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4800 #if HAVE_GETRUSAGE
4801  struct rusage rusage;
4802 
4803  getrusage(RUSAGE_SELF, &rusage);
4804  time_stamps.user_usec =
4805  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4806  time_stamps.sys_usec =
4807  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4808 #elif HAVE_GETPROCESSTIMES
4809  HANDLE proc;
4810  FILETIME c, e, k, u;
4811  proc = GetCurrentProcess();
4812  GetProcessTimes(proc, &c, &e, &k, &u);
4813  time_stamps.user_usec =
4814  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4815  time_stamps.sys_usec =
4816  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4817 #else
4818  time_stamps.user_usec = time_stamps.sys_usec = 0;
4819 #endif
4820  return time_stamps;
4821 }
4822 
4823 static int64_t getmaxrss(void)
4824 {
4825 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4826  struct rusage rusage;
4827  getrusage(RUSAGE_SELF, &rusage);
4828  return (int64_t)rusage.ru_maxrss * 1024;
4829 #elif HAVE_GETPROCESSMEMORYINFO
4830  HANDLE proc;
4831  PROCESS_MEMORY_COUNTERS memcounters;
4832  proc = GetCurrentProcess();
4833  memcounters.cb = sizeof(memcounters);
4834  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4835  return memcounters.PeakPagefileUsage;
4836 #else
4837  return 0;
4838 #endif
4839 }
4840 
4841 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4842 {
4843 }
4844 
4845 int main(int argc, char **argv)
4846 {
4847  int i, ret;
4849 
4850  init_dynload();
4851 
4853 
4854  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4855 
4857  parse_loglevel(argc, argv, options);
4858 
4859  if(argc>1 && !strcmp(argv[1], "-d")){
4860  run_as_daemon=1;
4862  argc--;
4863  argv++;
4864  }
4865 
4866 #if CONFIG_AVDEVICE
4868 #endif
4870 
4871  show_banner(argc, argv, options);
4872 
4873  /* parse options and open all input/output files */
4874  ret = ffmpeg_parse_options(argc, argv);
4875  if (ret < 0)
4876  exit_program(1);
4877 
4878  if (nb_output_files <= 0 && nb_input_files == 0) {
4879  show_usage();
4880  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4881  exit_program(1);
4882  }
4883 
4884  /* file converter / grab */
4885  if (nb_output_files <= 0) {
4886  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4887  exit_program(1);
4888  }
4889 
4890  for (i = 0; i < nb_output_files; i++) {
4891  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4892  want_sdp = 0;
4893  }
4894 
4896  if (transcode() < 0)
4897  exit_program(1);
4898  if (do_benchmark) {
4899  int64_t utime, stime, rtime;
4901  utime = current_time.user_usec - ti.user_usec;
4902  stime = current_time.sys_usec - ti.sys_usec;
4903  rtime = current_time.real_usec - ti.real_usec;
4905  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4906  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4907  }
4908  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4911  exit_program(69);
4912 
4914  return main_return_code;
4915 }
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
AVSubtitle
Definition: avcodec.h:3933
print_sdp
static void print_sdp(void)
Definition: ffmpeg.c:2753
avcodec_close
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1117
avcodec_encode_subtitle
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:342
AVCodecContext::frame_size
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2245
InputFilter::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg.h:248
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:599
AVCodec
AVCodec.
Definition: avcodec.h:3481
pthread_join
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:90
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
av_codec_get_id
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
ifilter_parameters_from_codecpar
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1844
need_output
static int need_output(void)
Definition: ffmpeg.c:3837
audio_sync_method
int audio_sync_method
Definition: ffmpeg_opt.c:92
check_output_constraints
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1976
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
psnr
static double psnr(double d)
Definition: ffmpeg.c:1355
AVERROR_EXPERIMENTAL
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
Definition: error.h:72
AV_FIELD_PROGRESSIVE
@ AV_FIELD_PROGRESSIVE
Definition: avcodec.h:1545
level
uint8_t level
Definition: svq3.c:207
InputStream::prev_sub
struct InputStream::@24 prev_sub
AV_CODEC_ID_AC3
@ AV_CODEC_ID_AC3
Definition: avcodec.h:567
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:159
FKF_PREV_FORCED_T
@ FKF_PREV_FORCED_T
Definition: ffmpeg.h:428
atomic_store
#define atomic_store(object, desired)
Definition: stdatomic.h:85
reset_eagain
static void reset_eagain(void)
Definition: ffmpeg.c:4163
InputStream::hwaccel_device
char * hwaccel_device
Definition: ffmpeg.h:366
avcodec_receive_packet
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:417
seek_to_start
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4192
AVOutputFormat::name
const char * name
Definition: avformat.h:496
VSYNC_PASSTHROUGH
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
r
const char * r
Definition: vf_curves.c:114
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:150
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:3938
opt.h
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1005
ffmpeg_exited
static volatile int ffmpeg_exited
Definition: ffmpeg.c:337
AVCodecContext::get_format
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1817
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3953
forced_keyframes_const_names
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
LIBAVCODEC_IDENT
#define LIBAVCODEC_IDENT
Definition: version.h:42
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2276
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
libm.h
video_sync_method
int video_sync_method
Definition: ffmpeg_opt.c:93
InputFilter::width
int width
Definition: ffmpeg.h:247
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1269
av_fifo_generic_write
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
AVCodecHWConfig::methods
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3464
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:2225
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
FKF_PREV_FORCED_N
@ FKF_PREV_FORCED_N
Definition: ffmpeg.h:427
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
FilterGraph::graph_desc
const char * graph_desc
Definition: ffmpeg.h:284
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1571
AVCodecContext::thread_safe_callbacks
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2853
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3949
n
int n
Definition: avisynth_c.h:760
InputStream::data_size
uint64_t data_size
Definition: ffmpeg.h:380
u
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:252
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
thread.h
AV_RL64
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2522
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVFMT_VARIABLE_FPS
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:470
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:89
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:289
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:838
AV_FRAME_DATA_A53_CC
@ AV_FRAME_DATA_A53_CC
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:925
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
sub2video_heartbeat
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:274
InputFile::nb_streams_warn
int nb_streams_warn
Definition: ffmpeg.h:411
avcodec_parameters_from_context
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2098
remove_avoptions
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:637
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:94
InputStream::dec_ctx
AVCodecContext * dec_ctx
Definition: ffmpeg.h:304
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:467
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
transcode_step
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4581
BenchmarkTimeStamps::user_usec
int64_t user_usec
Definition: ffmpeg.c:125
AVSubtitleRect
Definition: avcodec.h:3898
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:3937
AV_DISPOSITION_DEFAULT
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:815
av_bsf_send_packet
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:186
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: avcodec.h:230
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
ffmpeg_parse_options
int ffmpeg_parse_options(int argc, char **argv)
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:211
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:26
AV_THREAD_MESSAGE_NONBLOCK
@ AV_THREAD_MESSAGE_NONBLOCK
Perform non-blocking operation.
Definition: threadmessage.h:31
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1410
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
AVFrame::width
int width
Definition: frame.h:353
OutputStream::unavailable
int unavailable
Definition: ffmpeg.h:513
AVPacketSideData
Definition: avcodec.h:1420
AVCodec::capabilities
int capabilities
Codec capabilities.
Definition: avcodec.h:3500
w
uint8_t w
Definition: llviddspenc.c:38
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1070
FKF_T
@ FKF_T
Definition: ffmpeg.h:429
AVPacket::data
uint8_t * data
Definition: avcodec.h:1477
current_time
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
AVComponentDescriptor::depth
int depth
Number of bits in the component.
Definition: pixdesc.h:58
finish_output_stream
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1405
AVCodecContext::field_order
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2222
AVFrame::top_field_first
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:447
AVOption
AVOption.
Definition: opt.h:246
ATOMIC_VAR_INIT
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:150
FilterGraph::index
int index
Definition: ffmpeg.h:283
AVStream::avg_frame_rate
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:943
VSYNC_VSCFR
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
AVStream::cur_dts
int64_t cur_dts
Definition: avformat.h:1073
InputStream::nb_filters
int nb_filters
Definition: ffmpeg.h:359
atomic_int
intptr_t atomic_int
Definition: stdatomic.h:55
AVFrame::pkt_duration
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown.
Definition: frame.h:574
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
AVCodecContext::subtitle_header
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3050
transcode
static int transcode(void)
Definition: ffmpeg.c:4656
VSYNC_AUTO
#define VSYNC_AUTO
Definition: ffmpeg.h:49
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
BenchmarkTimeStamps::sys_usec
int64_t sys_usec
Definition: ffmpeg.c:126
HANDLE
PVOID HANDLE
Definition: basicDataTypeConversions.h:21
progress_avio
AVIOContext * progress_avio
Definition: ffmpeg.c:143
show_usage
void show_usage(void)
Definition: ffmpeg_opt.c:3242
do_audio_out
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:911
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:290
av_display_rotation_set
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1495
AVCodecParameters::codec_tag
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:3961
max
#define max(a, b)
Definition: cuda_runtime.h:33
mathematics.h
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVDictionary
Definition: dict.c:30
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:532
AV_CODEC_FLAG_PSNR
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:887
OutputFilter::sample_rates
int * sample_rates
Definition: ffmpeg.h:279
check_recording_time
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:898
VSYNC_CFR
#define VSYNC_CFR
Definition: ffmpeg.h:51
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1785
decode_audio
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2283
av_fifo_generic_read
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
InputStream::hwaccel_get_buffer
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
InputStream::decoding_needed
int decoding_needed
Definition: ffmpeg.h:300
av_buffersink_get_hw_frames_ctx
AVBufferRef * av_buffersink_get_hw_frames_ctx(const AVFilterContext *ctx)
AV_CODEC_FLAG_INTERLACED_ME
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:917
flush_encoders
static void flush_encoders(void)
Definition: ffmpeg.c:1857
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
ost
static AVStream * ost
Definition: vaapi_transcode.c:45
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
os_support.h
AV_PKT_FLAG_KEY
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1509
sample_rate
sample_rate
Definition: ffmpeg_filter.c:191
get_input_packet
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4134
qp_hist
int qp_hist
Definition: ffmpeg_opt.c:107
term_exit_sigsafe
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:320
AVBSFContext
The bitstream filter state.
Definition: avcodec.h:5763
ECHO
#define ECHO(name, type, min, max)
Definition: af_aecho.c:186
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
want_sdp
static int want_sdp
Definition: ffmpeg.c:140
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
InputFilter::channel_layout
uint64_t channel_layout
Definition: ffmpeg.h:252
InputFilter::ist
struct InputStream * ist
Definition: ffmpeg.h:237
do_video_out
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1054
av_expr_parse
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
InputFile::eof_reached
int eof_reached
Definition: ffmpeg.h:395
exit_program
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:136
InputStream
Definition: ffmpeg.h:295
AVCodecContext::framerate
AVRational framerate
Definition: avcodec.h:3105
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:103
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4452
got_eagain
static int got_eagain(void)
Definition: ffmpeg.c:4154
ifilter_send_eof
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2207
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1620
nb_frames_drop
static int nb_frames_drop
Definition: ffmpeg.c:137
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
fmt
const char * fmt
Definition: avisynth_c.h:861
av_buffersink_set_frame_size
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
dts_delta_threshold
float dts_delta_threshold
Definition: ffmpeg_opt.c:88
AVCodecParameters::channels
int channels
Audio only.
Definition: avcodec.h:4063
fifo.h
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1179
finish
static void finish(void)
Definition: movenc.c:345
vstats_version
int vstats_version
Definition: ffmpeg_opt.c:113
hwaccels
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
AVCodecContext::codec
const struct AVCodec * codec
Definition: avcodec.h:1574
InputStream::sub2video
struct InputStream::sub2video sub2video
fail
#define fail()
Definition: checkasm.h:120
InputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:240
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
InputStream::decoder_opts
AVDictionary * decoder_opts
Definition: ffmpeg.h:332
AVProgram::discard
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1267
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
check_init_output_file
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2972
InputStream::filter_in_rescale_delta_last
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:319
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVFrame::key_frame
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:373
AVCodecContext::flags
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1645
AVChapter
Definition: avformat.h:1299
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
InputStream::nb_packets
uint64_t nb_packets
Definition: ffmpeg.h:382
AV_DISPOSITION_FORCED
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:827
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
pts
static int64_t pts
Definition: transcode_aac.c:647
av_thread_message_queue_recv
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
Definition: threadmessage.c:172
hw_device_setup_for_decode
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:301
InputFilter::frame_queue
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:242
us
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:266
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1252
avcodec_copy_context
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:215
AV_CODEC_ID_MP3
@ AV_CODEC_ID_MP3
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:565
AVStream::duration
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:919
FFMIN3
#define FFMIN3(a, b, c)
Definition: common.h:97
av_codec_get_tag2
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
OutputFile::opts
AVDictionary * opts
Definition: ffmpeg.h:556
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
InputStream::sub2video::last_pts
int64_t last_pts
Definition: ffmpeg.h:347
loop
static int loop
Definition: ffplay.c:340
do_pkt_dump
int do_pkt_dump
Definition: ffmpeg_opt.c:99
av_expr_free
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
AVRational::num
int num
Numerator.
Definition: rational.h:59
src
#define src
Definition: vp8dsp.c:254
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5021
InputFile
Definition: ffmpeg.h:393
AV_CODEC_FLAG_INTERLACED_DCT
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:896
init_output_stream_streamcopy
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3054
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1098
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
AV_CODEC_ID_DVB_SUBTITLE
@ AV_CODEC_ID_DVB_SUBTITLE
Definition: avcodec.h:659
AVCodecContext::get_buffer2
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2380
AV_DISPOSITION_CLEAN_EFFECTS
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:830
ffmpeg_cleanup
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:483
InputStream::hwaccel_pix_fmt
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
OutputFile::shortest
int shortest
Definition: ffmpeg.h:562
avassert.h
InputStream::dts
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:313
lrint
#define lrint
Definition: tablegen.h:53
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
av_fifo_space
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
buf
void * buf
Definition: avisynth_c.h:766
AV_PKT_FLAG_CORRUPT
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1510
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:571
av_thread_message_queue_send
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
Definition: threadmessage.c:156
choose_output
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3867
AVStream::first_dts
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1072
BenchmarkTimeStamps::real_usec
int64_t real_usec
Definition: ffmpeg.c:124
media_type_string
#define media_type_string
Definition: cmdutils.h:617
duration
int64_t duration
Definition: movenc.c:63
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5522
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
av_opt_set_dict
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1603
HWACCEL_GENERIC
@ HWACCEL_GENERIC
Definition: ffmpeg.h:61
AVCodecContext::has_b_frames
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1855
init_output_stream
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3454
input_streams
InputStream ** input_streams
Definition: ffmpeg.c:147
FFMAX3
#define FFMAX3(a, b, c)
Definition: common.h:95
AVCodecDescriptor
This struct describes the properties of a single codec described by an AVCodecID.
Definition: avcodec.h:716
InputStream::cfr_next_pts
int64_t cfr_next_pts
Definition: ffmpeg.h:326
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
AVCodecContext::global_quality
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1631
get_benchmark_time_stamps
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4797
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:291
vstats_filename
char * vstats_filename
Definition: ffmpeg_opt.c:84
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2119
close_output_stream
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:825
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2525
InputStream::framerate
AVRational framerate
Definition: ffmpeg.h:333
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:198
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4033
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:601
sub2video_update
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:240
AVFormatContext::chapters
AVChapter ** chapters
Definition: avformat.h:1572
AVDictionaryEntry::key
char * key
Definition: dict.h:82
ENCODER_FINISHED
@ ENCODER_FINISHED
Definition: ffmpeg.h:438
frame_size
int frame_size
Definition: mxfenc.c:2215
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AV_CODEC_ID_VP9
@ AV_CODEC_ID_VP9
Definition: avcodec.h:386
AVCodecParameters::width
int width
Video only.
Definition: avcodec.h:4023
AVCodecContext::ticks_per_frame
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1697
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
InputFilter
Definition: ffmpeg.h:235
get_input_stream
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2959
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
term_init
void term_init(void)
Definition: ffmpeg.c:387
do_streamcopy
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1993
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVIO_FLAG_WRITE
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:655
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:261
AVCodecContext::bits_per_raw_sample
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2796
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
AVPacketSideData::data
uint8_t * data
Definition: avcodec.h:1421
MUXER_FINISHED
@ MUXER_FINISHED
Definition: ffmpeg.h:439
ctx
AVFormatContext * ctx
Definition: movenc.c:48
InputStream::sub2video::sub_queue
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:349
abort_codec_experimental
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:655
InputStream::filters
InputFilter ** filters
Definition: ffmpeg.h:358
limits.h
av_expr_eval
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
max_error_rate
float max_error_rate
Definition: ffmpeg_opt.c:110
AVCodecHWConfig::pix_fmt
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3459
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3939
pix_fmt
static enum AVPixelFormat pix_fmt
Definition: demuxing_decoding.c:40
OutputFile::header_written
int header_written
Definition: ffmpeg.h:564
on
s EdgeDetect Foobar g libavfilter vf_edgedetect c libavfilter vf_foobar c edit libavfilter and add an entry for foobar following the pattern of the other filters edit libavfilter allfilters and add an entry for foobar following the pattern of the other filters configure make j< whatever > ffmpeg ffmpeg i you should get a foobar png with Lena edge detected That s your new playground is ready Some little details about what s going on
Definition: writing_filters.txt:34
term_exit
void term_exit(void)
Definition: ffmpeg.c:328
AVOutputFormat::codec_tag
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:521
av_hwdevice_get_type_name
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
compare_int64
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2966
InputStream::hwaccel_retrieve_data
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1183
av_bsf_free
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
AV_CODEC_ID_CODEC2
@ AV_CODEC_ID_CODEC2
Definition: avcodec.h:631
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:238
AVCodecContext::error
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2748
key
const char * key
Definition: hwcontext_opencl.c:168
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
AVMEDIA_TYPE_DATA
@ AVMEDIA_TYPE_DATA
Opaque data information usually continuous.
Definition: avutil.h:203
atomic_load
#define atomic_load(object)
Definition: stdatomic.h:93
av_fifo_realloc2
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
assert_avoptions
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:646
AV_CODEC_ID_H264
@ AV_CODEC_ID_H264
Definition: avcodec.h:245
av_packet_new_side_data
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
process_input_packet
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2571
av_rescale_delta
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:869
process_input
static int process_input(int file_index)
Definition: ffmpeg.c:4258
avformat_write_header
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:508
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:1575
int32_t
int32_t
Definition: audio_convert.c:194
av_packet_get_side_data
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1608
arg
const char * arg
Definition: jacosubdec.c:66
pthread_create
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:76
AVCodecDescriptor::props
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:732
FFABS
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
AVCodecParserContext::repeat_pict
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:5126
output_streams
OutputStream ** output_streams
Definition: ffmpeg.c:152
transcode_from_filter
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4535
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:811
InputStream::pts
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:316
AVFormatContext
Format I/O context.
Definition: avformat.h:1342
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:380
av_realloc_f
#define av_realloc_f(p, o, n)
Definition: tableprint_vlc.h:33
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
opts
AVDictionary * opts
Definition: movenc.c:50
AVPacket::buf
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1460
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1017
run_as_daemon
static int run_as_daemon
Definition: ffmpeg.c:134
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
print_final_stats
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1528
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:263
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:899
flush
static void flush(AVCodecContext *avctx)
Definition: aacdec_template.c:500
NULL
#define NULL
Definition: coverity.c:32
InputStream::sub2video::w
int w
Definition: ffmpeg.h:351
AVERROR_PATCHWELCOME
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
HWAccel::id
enum HWAccelID id
Definition: ffmpeg.h:70
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:125
InputStream::top_field_first
int top_field_first
Definition: ffmpeg.h:334
InputStream::st
AVStream * st
Definition: ffmpeg.h:297
avcodec_parameters_free
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2069
main
int main(int argc, char **argv)
Definition: ffmpeg.c:4845
update_benchmark
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:660
HWAccel
Definition: ffmpeg.h:67
AVCodec::type
enum AVMediaType type
Definition: avcodec.h:3494
send_frame_to_filters
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2257
decode_video
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2345
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:171
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
getmaxrss
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4823
InputStream::next_pts
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
frame_bits_per_raw_sample
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:109
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: avcodec.h:1423
AV_DISPOSITION_COMMENT
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:818
check_keyboard_interaction
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3906
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
AV_PICTURE_TYPE_I
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
AVCodecContext::subtitle_header_size
int subtitle_header_size
Definition: avcodec.h:3051
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1384
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:390
AV_CODEC_PROP_BITMAP_SUB
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:775
parseutils.h
InputStream::hwaccel_id
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
InputFilter::channels
int channels
Definition: ffmpeg.h:251
mathops.h
duration_max
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4173
InputStream::dec
AVCodec * dec
Definition: ffmpeg.h:305
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1268
main_return_code
static int main_return_code
Definition: ffmpeg.c:338
vstats_file
static FILE * vstats_file
Definition: ffmpeg.c:112
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:934
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
InputFilter::eof
int eof
Definition: ffmpeg.h:256
InputStream::fix_sub_duration
int fix_sub_duration
Definition: ffmpeg.h:339
AV_DISPOSITION_METADATA
#define AV_DISPOSITION_METADATA
Definition: avformat.h:852
AV_CODEC_CAP_VARIABLE_FRAME_SIZE
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1053
transcode_init
static int transcode_init(void)
Definition: ffmpeg.c:3664
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1397
get_format
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2798
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:565
sub2video_push_ref
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
time.h
close_all_output_streams
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:681
received_nb_signals
static volatile int received_nb_signals
Definition: ffmpeg.c:335
do_benchmark_all
int do_benchmark_all
Definition: ffmpeg_opt.c:97
AV_DISPOSITION_ORIGINAL
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:817
nb_input_streams
int nb_input_streams
Definition: ffmpeg.c:148
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:655
InputStream::min_pts
int64_t min_pts
Definition: ffmpeg.h:321
HWAccel::name
const char * name
Definition: ffmpeg.h:68
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:292
swresample.h
index
int index
Definition: gxfenc.c:89
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: avcodec.h:4067
AVFormatContext::oformat
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1361
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
error
static void error(const char *err)
Definition: target_dec_fuzzer.c:61
input_files
InputFile ** input_files
Definition: ffmpeg.c:149
AVStream::nb_frames
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:921
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
InputStream::frames_decoded
uint64_t frames_decoded
Definition: ffmpeg.h:384
OutputFilter::formats
int * formats
Definition: ffmpeg.h:277
InputStream::next_dts
int64_t next_dts
Definition: ffmpeg.h:312
AVFrame::best_effort_timestamp
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:559
FilterGraph
Definition: ffmpeg.h:282
print_stats
int print_stats
Definition: ffmpeg_opt.c:106
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1398
InputFilter::filter
AVFilterContext * filter
Definition: ffmpeg.h:236
AVOutputFormat::flags
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:515
ff_dlog
#define ff_dlog(a,...)
Definition: tableprint_vlc.h:29
VSYNC_VFR
#define VSYNC_VFR
Definition: ffmpeg.h:52
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:476
AVCodecContext::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:1688
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:269
FF_DEBUG_DCT_COEFF
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2662
options
const OptionDef options[]
AV_DISPOSITION_CAPTIONS
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:850
AVCodecContext::stats_out
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2584
FALSE
#define FALSE
Definition: windows2linux.h:37
desc
const char * desc
Definition: nvenc.c:68
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:378
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
InputStream::hwaccel_device_type
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
InputStream::decoded_frame
AVFrame * decoded_frame
Definition: ffmpeg.h:306
AVPacket::size
int size
Definition: avcodec.h:1478
InputStream::wrap_correction_done
int wrap_correction_done
Definition: ffmpeg.h:317
InputStream::start
int64_t start
Definition: ffmpeg.h:309
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:806
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
InputStream::filter_frame
AVFrame * filter_frame
Definition: ffmpeg.h:307
threadmessage.h
InputStream::file_index
int file_index
Definition: ffmpeg.h:296
output_files
OutputFile ** output_files
Definition: ffmpeg.c:154
parse_forced_key_frames
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3209
AVFrame::quality
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:418
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
received_sigterm
static volatile int received_sigterm
Definition: ffmpeg.c:334
start_time
static int64_t start_time
Definition: ffplay.c:331
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
FilterGraph::graph
AVFilterGraph * graph
Definition: ffmpeg.h:286
AVFormatContext::url
char * url
input or output URL.
Definition: avformat.h:1438
TRUE
#define TRUE
Definition: windows2linux.h:33
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2233
send_filter_eof
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2555
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3119
InputStream::got_output
int got_output
Definition: ffmpeg.h:341
AVCodec::defaults
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3554
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:90
size
int size
Definition: twinvq_data.h:11134
copy_ts
int copy_ts
Definition: ffmpeg_opt.c:100
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
subtitle_out
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
copy_tb
int copy_tb
Definition: ffmpeg_opt.c:102
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1317
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
InputStream::hwaccel_retrieved_pix_fmt
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
hwaccel_decode_init
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:475
av_stream_get_codec_timebase
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5813
OutputStream::source_index
int source_index
Definition: ffmpeg.h:445
DECODING_FOR_OST
#define DECODING_FOR_OST
Definition: ffmpeg.h:301
FFDIFFSIGN
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
AVFMT_NOFILE
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:463
AV_DISPOSITION_DUB
#define AV_DISPOSITION_DUB
Definition: avformat.h:816
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:368
AV_PICTURE_TYPE_NONE
@ AV_PICTURE_TYPE_NONE
Undefined.
Definition: avutil.h:273
AVStream::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:932
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:3936
avdevice.h
AVFMT_NOSTREAMS
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:472
AV_DISPOSITION_HEARING_IMPAIRED
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:828
OSTFinished
OSTFinished
Definition: ffmpeg.h:437
av_frame_remove_side_data
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame.
Definition: frame.c:805
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: avcodec.h:1476
avio_write
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:218
AV_CODEC_FLAG_PASS2
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:875
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:360
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
InputStream::samples_decoded
uint64_t samples_decoded
Definition: ffmpeg.h:385
OutputFile::limit_filesize
uint64_t limit_filesize
Definition: ffmpeg.h:560
dup_warning
static unsigned dup_warning
Definition: ffmpeg.c:136
AVPacketSideData::size
int size
Definition: avcodec.h:1422
av_sdp_create
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:842
AV_FIELD_TT
@ AV_FIELD_TT
Definition: avcodec.h:1546
InputStream::max_pts
int64_t max_pts
Definition: ffmpeg.h:322
AVPacket::flags
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1483
av_packet_make_refcounted
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:663
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
do_benchmark
int do_benchmark
Definition: ffmpeg_opt.c:96
get_buffer
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2884
bitrate
int64_t bitrate
Definition: h264_levels.c:131
av_packet_rescale_ts
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another.
Definition: avpacket.c:704
av_buffersink_get_type
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
ifilter_send_frame
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2130
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:3881
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
av_thread_message_queue_alloc
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:2226
decode
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2234
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:972
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: avcodec.h:3495
guess_input_channel_layout
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2080
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
write_packet
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:690
do_subtitle_out
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:971
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
register_exit
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:131
do_video_stats
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1360
HWACCEL_AUTO
@ HWACCEL_AUTO
Definition: ffmpeg.h:60
AVFrame::interlaced_frame
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:442
AV_CODEC_ID_MJPEG
@ AV_CODEC_ID_MJPEG
Definition: avcodec.h:225
InputStream::guess_layout_max
int guess_layout_max
Definition: ffmpeg.h:335
avcodec_default_get_buffer2
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1702
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
avio_closep
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1210
av_write_trailer
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1254
av_log_set_level
void av_log_set_level(int level)
Set the log level.
Definition: log.c:385
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
bprint.h
DECODING_FOR_FILTER
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:302
lrintf
#define lrintf(x)
Definition: libm_mips.h:70
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1470
InputStream::ret
int ret
Definition: ffmpeg.h:342
AV_FRAME_FLAG_CORRUPT
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:520
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
sub2video_flush
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:304
hw_device_ctx
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
avcodec_descriptor_get
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3257
internal.h
AVCodecParameters::height
int height
Definition: avcodec.h:4024
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2155
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVCodecParameters::block_align
int block_align
Audio only.
Definition: avcodec.h:4074
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:158
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
display.h
av_thread_message_queue_set_err_send
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
Definition: threadmessage.c:188
vsnprintf
#define vsnprintf
Definition: snprintf.h:36
exit_on_error
int exit_on_error
Definition: ffmpeg_opt.c:104
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffmpeg.c:341
OutputFile::ost_index
int ost_index
Definition: ffmpeg.h:557
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
delta
float delta
Definition: vorbis_enc_data.h:457
InputStream::hwaccel_uninit
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
AV_DISPOSITION_KARAOKE
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:820
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
av_get_audio_frame_duration
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1775
transcode_subtitles
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2478
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1264
AVCodec::name
const char * name
Name of the codec implementation.
Definition: avcodec.h:3488
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AV_DISPOSITION_DEPENDENT
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:853
AVCodecContext::chroma_sample_location
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2207
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:481
AVCodecContext::height
int height
Definition: avcodec.h:1738
OutputFilter::channel_layouts
uint64_t * channel_layouts
Definition: ffmpeg.h:278
avcodec_send_frame
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:387
AVCodecContext::pix_fmt
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1775
nb_output_files
int nb_output_files
Definition: ffmpeg.c:155
AVCodecParameters::field_order
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4038
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
AVFMT_TS_NONSTRICT
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:477
AVHWFramesContext
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
av_opt_eval_flags
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:923
mid_pred
#define mid_pred
Definition: mathops.h:97
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
AV_DISPOSITION_VISUAL_IMPAIRED
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:829
decode_error_stat
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:877
AVFrame::decode_error_flags
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:590
AVFMT_FLAG_BITEXACT
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1490
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:870
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
abort_on_flags
int abort_on_flags
Definition: ffmpeg_opt.c:105
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:2028
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
output_packet
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:847
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
DWORD
uint32_t DWORD
Definition: basicDataTypeConversions.h:51
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:239
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
normalize.ifile
ifile
Definition: normalize.py:6
sdp_filename
char * sdp_filename
Definition: ffmpeg_opt.c:85
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:976
AV_CODEC_PROP_TEXT_SUB
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:780
AVCodecContext::opaque
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1607
InputStream::reinit_filters
int reinit_filters
Definition: ffmpeg.h:361
init_output_stream_encode
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3296
hw_device_free_all
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:272
avformat.h
InputFile::eagain
int eagain
Definition: ffmpeg.h:396
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:383
InputFile::ist_index
int ist_index
Definition: ffmpeg.h:397
AV_DISPOSITION_DESCRIPTIONS
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:851
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AV_RL32
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:88
InputFilter::sample_rate
int sample_rate
Definition: ffmpeg.h:250
avio_printf
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
HWAccel::init
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:69
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5033
ifilter_parameters_from_frame
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1185
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodecContext
main external API structure.
Definition: avcodec.h:1565
AVFrame::height
int height
Definition: frame.h:353
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:871
transcode_init_done
static atomic_int transcode_init_done
Definition: ffmpeg.c:336
BenchmarkTimeStamps
Definition: ffmpeg.c:123
avformat_transfer_internal_stream_timing_info
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5751
BOOL
uint32_t BOOL
Definition: basicDataTypeConversions.h:16
hw_device_setup_for_encode
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:417
channel_layout.h
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
av_bsf_init
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:135
InputFilter::format
int format
Definition: ffmpeg.h:245
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
OutputStream::finished
OSTFinished finished
Definition: ffmpeg.h:512
report_new_stream
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3649
AVRational::den
int den
Denominator.
Definition: rational.h:60
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
config.h
av_stream_get_end_pts
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:141
check_decode_result
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2100
avfilter.h
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4414
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
av_bsf_receive_packet
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:212
AVPixFmtDescriptor::comp
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:994
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
InputStream::nb_samples
int64_t nb_samples
Definition: ffmpeg.h:328
InputFilter::height
int height
Definition: ffmpeg.h:247
AV_CODEC_CAP_DELAY
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:1006
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: avcodec.h:1479
av_buffer_ref
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
avcodec_get_hw_config
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1824
InputFile::ts_offset
int64_t ts_offset
Definition: ffmpeg.h:404
InputStream::discard
int discard
Definition: ffmpeg.h:298
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
print_report
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1645
AV_CODEC_CAP_PARAM_CHANGE
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1045
OutputFilter
Definition: ffmpeg.h:259
nb_frames_dup
static int nb_frames_dup
Definition: ffmpeg.c:135
InputStream::sub2video::frame
AVFrame * frame
Definition: ffmpeg.h:350
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:1573
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:299
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:469
audio_volume
int audio_volume
Definition: ffmpeg_opt.c:91
AV_CODEC_FLAG_BITEXACT
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:908
OutputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:555
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:225
AVCodecParameters::video_delay
int video_delay
Video only.
Definition: avcodec.h:4052
av_fifo_size
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
InputStream::sub2video::h
int h
Definition: ffmpeg.h:351
set_encoder_id
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3168
AVCodecParameters::format
int format
Definition: avcodec.h:3981
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
InputFilter::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:254
InputStream::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
avio_flush
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:238
av_fifo_freep
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
FKF_N_FORCED
@ FKF_N_FORCED
Definition: ffmpeg.h:426
AVDictionaryEntry
Definition: dict.h:81
InputStream::sub2video::end_pts
int64_t end_pts
Definition: ffmpeg.h:348
av_add_q
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
stdin_interaction
int stdin_interaction
Definition: ffmpeg_opt.c:108
do_hex_dump
int do_hex_dump
Definition: ffmpeg_opt.c:98
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
AVCodecParameters::codec_id
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3957
InputStream::ts_scale
double ts_scale
Definition: ffmpeg.h:330
AVPacket
This structure stores compressed data.
Definition: avcodec.h:1454
av_thread_message_queue_free
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
av_interleaved_write_frame
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1192
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
init_input_stream
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2894
cmdutils.h
AVPacket::pos
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1497
AV_FIELD_TB
@ AV_FIELD_TB
Definition: avcodec.h:1548
av_copy_packet_side_data
attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
InputFile::input_ts_offset
int64_t input_ts_offset
Definition: ffmpeg.h:402
AVCodecParameters::channel_layout
uint64_t channel_layout
Audio only.
Definition: avcodec.h:4059
InputStream::dts_buffer
int64_t * dts_buffer
Definition: ffmpeg.h:387
AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
@ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3427
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:158
AV_FIELD_BB
@ AV_FIELD_BB
Definition: avcodec.h:1547
AVCodecContext::width
int width
picture width / height.
Definition: avcodec.h:1738
AV_OPT_TYPE_FLAGS
@ AV_OPT_TYPE_FLAGS
Definition: opt.h:222
av_fifo_alloc
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:165
imgutils.h
AV_PKT_DATA_QUALITY_STATS
@ AV_PKT_DATA_QUALITY_STATS
This side data contains quality related information from the encoder.
Definition: avcodec.h:1276
timestamp.h
OutputStream
Definition: muxing.c:53
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
hwcontext.h
av_thread_message_queue_set_err_recv
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
Definition: threadmessage.c:199
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1287
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
codec_flags
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:89
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
av_stream_get_parser
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:149
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
AVCodecHWConfig
Definition: avcodec.h:3455
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
av_pkt_dump_log2
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:113
InputStream::nb_dts_buffer
int nb_dts_buffer
Definition: ffmpeg.h:388
InputStream::saw_first_ts
int saw_first_ts
Definition: ffmpeg.h:331
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:909
set_tty_echo
static void set_tty_echo(int on)
Definition: ffmpeg.c:3894
avstring.h
FF_QP2LAMBDA
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
InputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:409
FKF_N
@ FKF_N
Definition: ffmpeg.h:425
AVStream::pts_wrap_bits
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1062
log_callback_null
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4841
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
AV_FIELD_BT
@ AV_FIELD_BT
Definition: avcodec.h:1549
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
snprintf
#define snprintf
Definition: snprintf.h:34
ABORT_ON_FLAG_EMPTY_OUTPUT
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
read_key
static int read_key(void)
Definition: ffmpeg.c:425
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
reap_filters
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity.
Definition: ffmpeg.c:1424
VSYNC_DROP
#define VSYNC_DROP
Definition: ffmpeg.h:54
buffersrc.h
AVCodecHWConfig::device_type
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3471
InputStream::subtitle
AVSubtitle subtitle
Definition: ffmpeg.h:343
AVCodecContext::sample_aspect_ratio
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:1944
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
init_output_bsfs
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3018
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:3935
filtergraph_is_simple
int filtergraph_is_simple(FilterGraph *fg)
Definition: ffmpeg_filter.c:1217
init_encoder_time_base
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3272
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2080
dec_ctx
static AVCodecContext * dec_ctx
Definition: filtering_audio.c:43
nb_output_streams
int nb_output_streams
Definition: ffmpeg.c:153
AV_CODEC_FLAG_PASS1
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:871
OutputFile
Definition: ffmpeg.h:554
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
AV_DISPOSITION_LYRICS
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:819
InputStream::autorotate
int autorotate
Definition: ffmpeg.h:337
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
FF_API_DEBUG_MV
#define FF_API_DEBUG_MV
Definition: version.h:58