FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_IO_H
36 #include <io.h>
37 #endif
38 #if HAVE_UNISTD_H
39 #include <unistd.h>
40 #endif
41 
42 #include "libavformat/avformat.h"
43 #include "libavdevice/avdevice.h"
45 #include "libavutil/opt.h"
47 #include "libavutil/parseutils.h"
48 #include "libavutil/samplefmt.h"
49 #include "libavutil/fifo.h"
50 #include "libavutil/internal.h"
51 #include "libavutil/intreadwrite.h"
52 #include "libavutil/dict.h"
53 #include "libavutil/mathematics.h"
54 #include "libavutil/pixdesc.h"
55 #include "libavutil/avstring.h"
56 #include "libavutil/libm.h"
57 #include "libavutil/imgutils.h"
58 #include "libavutil/timestamp.h"
59 #include "libavutil/bprint.h"
60 #include "libavutil/time.h"
62 #include "libavcodec/mathops.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avfilter.h"
66 # include "libavfilter/buffersrc.h"
67 # include "libavfilter/buffersink.h"
68 
69 #if HAVE_SYS_RESOURCE_H
70 #include <sys/time.h>
71 #include <sys/types.h>
72 #include <sys/resource.h>
73 #elif HAVE_GETPROCESSTIMES
74 #include <windows.h>
75 #endif
76 #if HAVE_GETPROCESSMEMORYINFO
77 #include <windows.h>
78 #include <psapi.h>
79 #endif
80 #if HAVE_SETCONSOLECTRLHANDLER
81 #include <windows.h>
82 #endif
83 
84 
85 #if HAVE_SYS_SELECT_H
86 #include <sys/select.h>
87 #endif
88 
89 #if HAVE_TERMIOS_H
90 #include <fcntl.h>
91 #include <sys/ioctl.h>
92 #include <sys/time.h>
93 #include <termios.h>
94 #elif HAVE_KBHIT
95 #include <conio.h>
96 #endif
97 
98 #if HAVE_PTHREADS
99 #include <pthread.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 static void do_video_stats(OutputStream *ost, int frame_size);
124 static int64_t getutime(void);
125 static int64_t getmaxrss(void);
126 
127 static int run_as_daemon = 0;
128 static int nb_frames_dup = 0;
129 static int nb_frames_drop = 0;
130 static int64_t decode_error_stat[2];
131 
132 static int want_sdp = 1;
133 
134 static int current_time;
136 
138 
143 
148 
151 
152 #if HAVE_TERMIOS_H
153 
154 /* init terminal so that we can grab keys */
155 static struct termios oldtty;
156 static int restore_tty;
157 #endif
158 
159 #if HAVE_PTHREADS
160 static void free_input_threads(void);
161 #endif
162 
163 /* sub2video hack:
164  Convert subtitles to video with alpha to insert them in filter graphs.
165  This is a temporary solution until libavfilter gets real subtitles support.
166  */
167 
169 {
170  int ret;
171  AVFrame *frame = ist->sub2video.frame;
172 
173  av_frame_unref(frame);
174  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
175  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
177  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
178  return ret;
179  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
180  return 0;
181 }
182 
183 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
184  AVSubtitleRect *r)
185 {
186  uint32_t *pal, *dst2;
187  uint8_t *src, *src2;
188  int x, y;
189 
190  if (r->type != SUBTITLE_BITMAP) {
191  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
192  return;
193  }
194  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
195  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
196  r->x, r->y, r->w, r->h, w, h
197  );
198  return;
199  }
200 
201  dst += r->y * dst_linesize + r->x * 4;
202  src = r->data[0];
203  pal = (uint32_t *)r->data[1];
204  for (y = 0; y < r->h; y++) {
205  dst2 = (uint32_t *)dst;
206  src2 = src;
207  for (x = 0; x < r->w; x++)
208  *(dst2++) = pal[*(src2++)];
209  dst += dst_linesize;
210  src += r->linesize[0];
211  }
212 }
213 
214 static void sub2video_push_ref(InputStream *ist, int64_t pts)
215 {
216  AVFrame *frame = ist->sub2video.frame;
217  int i;
218 
219  av_assert1(frame->data[0]);
220  ist->sub2video.last_pts = frame->pts = pts;
221  for (i = 0; i < ist->nb_filters; i++)
225 }
226 
227 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
228 {
229  AVFrame *frame = ist->sub2video.frame;
230  int8_t *dst;
231  int dst_linesize;
232  int num_rects, i;
233  int64_t pts, end_pts;
234 
235  if (!frame)
236  return;
237  if (sub) {
238  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
239  AV_TIME_BASE_Q, ist->st->time_base);
240  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
241  AV_TIME_BASE_Q, ist->st->time_base);
242  num_rects = sub->num_rects;
243  } else {
244  pts = ist->sub2video.end_pts;
245  end_pts = INT64_MAX;
246  num_rects = 0;
247  }
248  if (sub2video_get_blank_frame(ist) < 0) {
250  "Impossible to get a blank canvas.\n");
251  return;
252  }
253  dst = frame->data [0];
254  dst_linesize = frame->linesize[0];
255  for (i = 0; i < num_rects; i++)
256  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
257  sub2video_push_ref(ist, pts);
258  ist->sub2video.end_pts = end_pts;
259 }
260 
261 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
262 {
263  InputFile *infile = input_files[ist->file_index];
264  int i, j, nb_reqs;
265  int64_t pts2;
266 
267  /* When a frame is read from a file, examine all sub2video streams in
268  the same file and send the sub2video frame again. Otherwise, decoded
269  video frames could be accumulating in the filter graph while a filter
270  (possibly overlay) is desperately waiting for a subtitle frame. */
271  for (i = 0; i < infile->nb_streams; i++) {
272  InputStream *ist2 = input_streams[infile->ist_index + i];
273  if (!ist2->sub2video.frame)
274  continue;
275  /* subtitles seem to be usually muxed ahead of other streams;
276  if not, subtracting a larger time here is necessary */
277  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
278  /* do not send the heartbeat frame if the subtitle is already ahead */
279  if (pts2 <= ist2->sub2video.last_pts)
280  continue;
281  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
282  sub2video_update(ist2, NULL);
283  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
284  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
285  if (nb_reqs)
286  sub2video_push_ref(ist2, pts2);
287  }
288 }
289 
290 static void sub2video_flush(InputStream *ist)
291 {
292  int i;
293 
294  if (ist->sub2video.end_pts < INT64_MAX)
295  sub2video_update(ist, NULL);
296  for (i = 0; i < ist->nb_filters; i++)
298 }
299 
300 /* end of sub2video hack */
301 
302 static void term_exit_sigsafe(void)
303 {
304 #if HAVE_TERMIOS_H
305  if(restore_tty)
306  tcsetattr (0, TCSANOW, &oldtty);
307 #endif
308 }
309 
310 void term_exit(void)
311 {
312  av_log(NULL, AV_LOG_QUIET, "%s", "");
314 }
315 
316 static volatile int received_sigterm = 0;
317 static volatile int received_nb_signals = 0;
318 static volatile int transcode_init_done = 0;
319 static volatile int ffmpeg_exited = 0;
320 static int main_return_code = 0;
321 
322 static void
324 {
325  received_sigterm = sig;
328  if(received_nb_signals > 3) {
329  write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
330  strlen("Received > 3 system signals, hard exiting\n"));
331 
332  exit(123);
333  }
334 }
335 
336 #if HAVE_SETCONSOLECTRLHANDLER
337 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
338 {
339  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
340 
341  switch (fdwCtrlType)
342  {
343  case CTRL_C_EVENT:
344  case CTRL_BREAK_EVENT:
345  sigterm_handler(SIGINT);
346  return TRUE;
347 
348  case CTRL_CLOSE_EVENT:
349  case CTRL_LOGOFF_EVENT:
350  case CTRL_SHUTDOWN_EVENT:
351  sigterm_handler(SIGTERM);
352  /* Basically, with these 3 events, when we return from this method the
353  process is hard terminated, so stall as long as we need to
354  to try and let the main thread(s) clean up and gracefully terminate
355  (we have at most 5 seconds, but should be done far before that). */
356  while (!ffmpeg_exited) {
357  Sleep(0);
358  }
359  return TRUE;
360 
361  default:
362  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
363  return FALSE;
364  }
365 }
366 #endif
367 
368 void term_init(void)
369 {
370 #if HAVE_TERMIOS_H
372  struct termios tty;
373  if (tcgetattr (0, &tty) == 0) {
374  oldtty = tty;
375  restore_tty = 1;
376 
377  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
378  |INLCR|IGNCR|ICRNL|IXON);
379  tty.c_oflag |= OPOST;
380  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
381  tty.c_cflag &= ~(CSIZE|PARENB);
382  tty.c_cflag |= CS8;
383  tty.c_cc[VMIN] = 1;
384  tty.c_cc[VTIME] = 0;
385 
386  tcsetattr (0, TCSANOW, &tty);
387  }
388  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
389  }
390 #endif
391 
392  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
393  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
394 #ifdef SIGXCPU
395  signal(SIGXCPU, sigterm_handler);
396 #endif
397 #if HAVE_SETCONSOLECTRLHANDLER
398  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
399 #endif
400 }
401 
402 /* read a key without blocking */
403 static int read_key(void)
404 {
405  unsigned char ch;
406 #if HAVE_TERMIOS_H
407  int n = 1;
408  struct timeval tv;
409  fd_set rfds;
410 
411  FD_ZERO(&rfds);
412  FD_SET(0, &rfds);
413  tv.tv_sec = 0;
414  tv.tv_usec = 0;
415  n = select(1, &rfds, NULL, NULL, &tv);
416  if (n > 0) {
417  n = read(0, &ch, 1);
418  if (n == 1)
419  return ch;
420 
421  return n;
422  }
423 #elif HAVE_KBHIT
424 # if HAVE_PEEKNAMEDPIPE
425  static int is_pipe;
426  static HANDLE input_handle;
427  DWORD dw, nchars;
428  if(!input_handle){
429  input_handle = GetStdHandle(STD_INPUT_HANDLE);
430  is_pipe = !GetConsoleMode(input_handle, &dw);
431  }
432 
433  if (is_pipe) {
434  /* When running under a GUI, you will end here. */
435  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
436  // input pipe may have been closed by the program that ran ffmpeg
437  return -1;
438  }
439  //Read it
440  if(nchars != 0) {
441  read(0, &ch, 1);
442  return ch;
443  }else{
444  return -1;
445  }
446  }
447 # endif
448  if(kbhit())
449  return(getch());
450 #endif
451  return -1;
452 }
453 
454 static int decode_interrupt_cb(void *ctx)
455 {
457 }
458 
460 
461 static void ffmpeg_cleanup(int ret)
462 {
463  int i, j;
464 
465  if (do_benchmark) {
466  int maxrss = getmaxrss() / 1024;
467  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
468  }
469 
470  for (i = 0; i < nb_filtergraphs; i++) {
471  FilterGraph *fg = filtergraphs[i];
473  for (j = 0; j < fg->nb_inputs; j++) {
474  av_freep(&fg->inputs[j]->name);
475  av_freep(&fg->inputs[j]);
476  }
477  av_freep(&fg->inputs);
478  for (j = 0; j < fg->nb_outputs; j++) {
479  av_freep(&fg->outputs[j]->name);
480  av_freep(&fg->outputs[j]);
481  }
482  av_freep(&fg->outputs);
483  av_freep(&fg->graph_desc);
484 
485  av_freep(&filtergraphs[i]);
486  }
487  av_freep(&filtergraphs);
488 
490 
491  /* close files */
492  for (i = 0; i < nb_output_files; i++) {
493  OutputFile *of = output_files[i];
495  if (!of)
496  continue;
497  s = of->ctx;
498  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
499  avio_closep(&s->pb);
501  av_dict_free(&of->opts);
502 
503  av_freep(&output_files[i]);
504  }
505  for (i = 0; i < nb_output_streams; i++) {
506  OutputStream *ost = output_streams[i];
507 
508  if (!ost)
509  continue;
510 
511  for (j = 0; j < ost->nb_bitstream_filters; j++)
512  av_bsf_free(&ost->bsf_ctx[j]);
513  av_freep(&ost->bsf_ctx);
515 
517  av_frame_free(&ost->last_frame);
518  av_dict_free(&ost->encoder_opts);
519 
520  av_parser_close(ost->parser);
522 
523  av_freep(&ost->forced_keyframes);
525  av_freep(&ost->avfilter);
526  av_freep(&ost->logfile_prefix);
527 
529  ost->audio_channels_mapped = 0;
530 
531  av_dict_free(&ost->sws_dict);
532 
535 
536  while (ost->muxing_queue && av_fifo_size(ost->muxing_queue)) {
537  AVPacket pkt;
538  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
539  av_packet_unref(&pkt);
540  }
542 
543  av_freep(&output_streams[i]);
544  }
545 #if HAVE_PTHREADS
546  free_input_threads();
547 #endif
548  for (i = 0; i < nb_input_files; i++) {
549  avformat_close_input(&input_files[i]->ctx);
550  av_freep(&input_files[i]);
551  }
552  for (i = 0; i < nb_input_streams; i++) {
553  InputStream *ist = input_streams[i];
554 
557  av_dict_free(&ist->decoder_opts);
560  av_freep(&ist->filters);
561  av_freep(&ist->hwaccel_device);
562  av_freep(&ist->dts_buffer);
563 
565 
566  av_freep(&input_streams[i]);
567  }
568 
569  if (vstats_file) {
570  if (fclose(vstats_file))
572  "Error closing vstats file, loss of information possible: %s\n",
573  av_err2str(AVERROR(errno)));
574  }
576 
577  av_freep(&input_streams);
578  av_freep(&input_files);
579  av_freep(&output_streams);
580  av_freep(&output_files);
581 
582  uninit_opts();
583 
585 
586  if (received_sigterm) {
587  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
588  (int) received_sigterm);
589  } else if (ret && transcode_init_done) {
590  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
591  }
592  term_exit();
593  ffmpeg_exited = 1;
594 }
595 
597 {
598  AVDictionaryEntry *t = NULL;
599 
600  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
602  }
603 }
604 
606 {
608  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
609  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
610  exit_program(1);
611  }
612 }
613 
614 static void abort_codec_experimental(AVCodec *c, int encoder)
615 {
616  exit_program(1);
617 }
618 
619 static void update_benchmark(const char *fmt, ...)
620 {
621  if (do_benchmark_all) {
622  int64_t t = getutime();
623  va_list va;
624  char buf[1024];
625 
626  if (fmt) {
627  va_start(va, fmt);
628  vsnprintf(buf, sizeof(buf), fmt, va);
629  va_end(va);
630  av_log(NULL, AV_LOG_INFO, "bench: %8"PRIu64" %s \n", t - current_time, buf);
631  }
632  current_time = t;
633  }
634 }
635 
636 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
637 {
638  int i;
639  for (i = 0; i < nb_output_streams; i++) {
640  OutputStream *ost2 = output_streams[i];
641  ost2->finished |= ost == ost2 ? this_stream : others;
642  }
643 }
644 
646 {
647  AVFormatContext *s = of->ctx;
648  AVStream *st = ost->st;
649  int ret;
650 
651  if (!of->header_written) {
652  AVPacket tmp_pkt;
653  /* the muxer is not initialized yet, buffer the packet */
654  if (!av_fifo_space(ost->muxing_queue)) {
655  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
656  ost->max_muxing_queue_size);
657  if (new_size <= av_fifo_size(ost->muxing_queue)) {
659  "Too many packets buffered for output stream %d:%d.\n",
660  ost->file_index, ost->st->index);
661  exit_program(1);
662  }
663  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
664  if (ret < 0)
665  exit_program(1);
666  }
667  av_packet_move_ref(&tmp_pkt, pkt);
668  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
669  return;
670  }
671 
674  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
675 
676  /*
677  * Audio encoders may split the packets -- #frames in != #packets out.
678  * But there is no reordering, so we can limit the number of output packets
679  * by simply dropping them here.
680  * Counting encoded video frames needs to be done separately because of
681  * reordering, see do_video_out()
682  */
683  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
684  if (ost->frame_number >= ost->max_frames) {
685  av_packet_unref(pkt);
686  return;
687  }
688  ost->frame_number++;
689  }
690  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
691  int i;
693  NULL);
694  ost->quality = sd ? AV_RL32(sd) : -1;
695  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
696 
697  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
698  if (sd && i < sd[5])
699  ost->error[i] = AV_RL64(sd + 8 + 8*i);
700  else
701  ost->error[i] = -1;
702  }
703 
704  if (ost->frame_rate.num && ost->is_cfr) {
705  if (pkt->duration > 0)
706  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
707  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
708  ost->st->time_base);
709  }
710  }
711 
712  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
713  if (pkt->dts != AV_NOPTS_VALUE &&
714  pkt->pts != AV_NOPTS_VALUE &&
715  pkt->dts > pkt->pts) {
716  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
717  pkt->dts, pkt->pts,
718  ost->file_index, ost->st->index);
719  pkt->pts =
720  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
721  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
722  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
723  }
725  pkt->dts != AV_NOPTS_VALUE &&
726  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
727  ost->last_mux_dts != AV_NOPTS_VALUE) {
728  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
729  if (pkt->dts < max) {
730  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
731  av_log(s, loglevel, "Non-monotonous DTS in output stream "
732  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
733  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
734  if (exit_on_error) {
735  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
736  exit_program(1);
737  }
738  av_log(s, loglevel, "changing to %"PRId64". This may result "
739  "in incorrect timestamps in the output file.\n",
740  max);
741  if (pkt->pts >= pkt->dts)
742  pkt->pts = FFMAX(pkt->pts, max);
743  pkt->dts = max;
744  }
745  }
746  }
747  ost->last_mux_dts = pkt->dts;
748 
749  ost->data_size += pkt->size;
750  ost->packets_written++;
751 
752  pkt->stream_index = ost->index;
753 
754  if (debug_ts) {
755  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
756  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
758  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
759  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
760  pkt->size
761  );
762  }
763 
764  ret = av_interleaved_write_frame(s, pkt);
765  if (ret < 0) {
766  print_error("av_interleaved_write_frame()", ret);
767  main_return_code = 1;
769  }
770  av_packet_unref(pkt);
771 }
772 
774 {
775  OutputFile *of = output_files[ost->file_index];
776 
777  ost->finished |= ENCODER_FINISHED;
778  if (of->shortest) {
779  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
780  of->recording_time = FFMIN(of->recording_time, end);
781  }
782 }
783 
785 {
786  int ret = 0;
787 
788  /* apply the output bitstream filters, if any */
789  if (ost->nb_bitstream_filters) {
790  int idx;
791 
792  ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
793  if (ret < 0)
794  goto finish;
795 
796  idx = 1;
797  while (idx) {
798  /* get a packet from the previous filter up the chain */
799  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
800  /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
801  * the api states this shouldn't happen after init(). Propagate it here to the
802  * muxer and to the next filters in the chain to workaround this.
803  * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
804  * par_out->extradata and adapt muxers accordingly to get rid of this. */
805  if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
806  ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
807  if (ret < 0)
808  goto finish;
809  ost->bsf_extradata_updated[idx - 1] |= 1;
810  }
811  if (ret == AVERROR(EAGAIN)) {
812  ret = 0;
813  idx--;
814  continue;
815  } else if (ret < 0)
816  goto finish;
817 
818  /* send it to the next filter down the chain or to the muxer */
819  if (idx < ost->nb_bitstream_filters) {
820  /* HACK/FIXME! - See above */
821  if (!(ost->bsf_extradata_updated[idx] & 2)) {
822  ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
823  if (ret < 0)
824  goto finish;
825  ost->bsf_extradata_updated[idx] |= 2;
826  }
827  ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
828  if (ret < 0)
829  goto finish;
830  idx++;
831  } else
832  write_packet(of, pkt, ost);
833  }
834  } else
835  write_packet(of, pkt, ost);
836 
837 finish:
838  if (ret < 0 && ret != AVERROR_EOF) {
839  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
840  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
841  if(exit_on_error)
842  exit_program(1);
843  }
844 }
845 
847 {
848  OutputFile *of = output_files[ost->file_index];
849 
850  if (of->recording_time != INT64_MAX &&
852  AV_TIME_BASE_Q) >= 0) {
853  close_output_stream(ost);
854  return 0;
855  }
856  return 1;
857 }
858 
859 static void do_audio_out(OutputFile *of, OutputStream *ost,
860  AVFrame *frame)
861 {
862  AVCodecContext *enc = ost->enc_ctx;
863  AVPacket pkt;
864  int ret;
865 
866  av_init_packet(&pkt);
867  pkt.data = NULL;
868  pkt.size = 0;
869 
870  if (!check_recording_time(ost))
871  return;
872 
873  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
874  frame->pts = ost->sync_opts;
875  ost->sync_opts = frame->pts + frame->nb_samples;
876  ost->samples_encoded += frame->nb_samples;
877  ost->frames_encoded++;
878 
879  av_assert0(pkt.size || !pkt.data);
881  if (debug_ts) {
882  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
883  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
884  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
885  enc->time_base.num, enc->time_base.den);
886  }
887 
888  ret = avcodec_send_frame(enc, frame);
889  if (ret < 0)
890  goto error;
891 
892  while (1) {
893  ret = avcodec_receive_packet(enc, &pkt);
894  if (ret == AVERROR(EAGAIN))
895  break;
896  if (ret < 0)
897  goto error;
898 
899  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
900 
901  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
902 
903  if (debug_ts) {
904  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
905  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
906  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
907  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
908  }
909 
910  output_packet(of, &pkt, ost);
911  }
912 
913  return;
914 error:
915  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
916  exit_program(1);
917 }
918 
919 static void do_subtitle_out(OutputFile *of,
920  OutputStream *ost,
921  AVSubtitle *sub)
922 {
923  int subtitle_out_max_size = 1024 * 1024;
924  int subtitle_out_size, nb, i;
925  AVCodecContext *enc;
926  AVPacket pkt;
927  int64_t pts;
928 
929  if (sub->pts == AV_NOPTS_VALUE) {
930  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
931  if (exit_on_error)
932  exit_program(1);
933  return;
934  }
935 
936  enc = ost->enc_ctx;
937 
938  if (!subtitle_out) {
939  subtitle_out = av_malloc(subtitle_out_max_size);
940  if (!subtitle_out) {
941  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
942  exit_program(1);
943  }
944  }
945 
946  /* Note: DVB subtitle need one packet to draw them and one other
947  packet to clear them */
948  /* XXX: signal it in the codec context ? */
950  nb = 2;
951  else
952  nb = 1;
953 
954  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
955  pts = sub->pts;
956  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
957  pts -= output_files[ost->file_index]->start_time;
958  for (i = 0; i < nb; i++) {
959  unsigned save_num_rects = sub->num_rects;
960 
961  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
962  if (!check_recording_time(ost))
963  return;
964 
965  sub->pts = pts;
966  // start_display_time is required to be 0
967  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
969  sub->start_display_time = 0;
970  if (i == 1)
971  sub->num_rects = 0;
972 
973  ost->frames_encoded++;
974 
975  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
976  subtitle_out_max_size, sub);
977  if (i == 1)
978  sub->num_rects = save_num_rects;
979  if (subtitle_out_size < 0) {
980  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
981  exit_program(1);
982  }
983 
984  av_init_packet(&pkt);
985  pkt.data = subtitle_out;
986  pkt.size = subtitle_out_size;
987  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
988  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
989  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
990  /* XXX: the pts correction is handled here. Maybe handling
991  it in the codec would be better */
992  if (i == 0)
993  pkt.pts += 90 * sub->start_display_time;
994  else
995  pkt.pts += 90 * sub->end_display_time;
996  }
997  pkt.dts = pkt.pts;
998  output_packet(of, &pkt, ost);
999  }
1000 }
1001 
1002 static void do_video_out(OutputFile *of,
1003  OutputStream *ost,
1004  AVFrame *next_picture,
1005  double sync_ipts)
1006 {
1007  int ret, format_video_sync;
1008  AVPacket pkt;
1009  AVCodecContext *enc = ost->enc_ctx;
1010  AVCodecParameters *mux_par = ost->st->codecpar;
1011  int nb_frames, nb0_frames, i;
1012  double delta, delta0;
1013  double duration = 0;
1014  int frame_size = 0;
1015  InputStream *ist = NULL;
1017 
1018  if (ost->source_index >= 0)
1019  ist = input_streams[ost->source_index];
1020 
1021  if (filter->inputs[0]->frame_rate.num > 0 &&
1022  filter->inputs[0]->frame_rate.den > 0)
1023  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
1024 
1025  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1026  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1027 
1028  if (!ost->filters_script &&
1029  !ost->filters &&
1030  next_picture &&
1031  ist &&
1032  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1033  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1034  }
1035 
1036  if (!next_picture) {
1037  //end, flushing
1038  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1039  ost->last_nb0_frames[1],
1040  ost->last_nb0_frames[2]);
1041  } else {
1042  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1043  delta = delta0 + duration;
1044 
1045  /* by default, we output a single frame */
1046  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1047  nb_frames = 1;
1048 
1049  format_video_sync = video_sync_method;
1050  if (format_video_sync == VSYNC_AUTO) {
1051  if(!strcmp(of->ctx->oformat->name, "avi")) {
1052  format_video_sync = VSYNC_VFR;
1053  } else
1054  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1055  if ( ist
1056  && format_video_sync == VSYNC_CFR
1057  && input_files[ist->file_index]->ctx->nb_streams == 1
1058  && input_files[ist->file_index]->input_ts_offset == 0) {
1059  format_video_sync = VSYNC_VSCFR;
1060  }
1061  if (format_video_sync == VSYNC_CFR && copy_ts) {
1062  format_video_sync = VSYNC_VSCFR;
1063  }
1064  }
1065  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1066 
1067  if (delta0 < 0 &&
1068  delta > 0 &&
1069  format_video_sync != VSYNC_PASSTHROUGH &&
1070  format_video_sync != VSYNC_DROP) {
1071  if (delta0 < -0.6) {
1072  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
1073  } else
1074  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1075  sync_ipts = ost->sync_opts;
1076  duration += delta0;
1077  delta0 = 0;
1078  }
1079 
1080  switch (format_video_sync) {
1081  case VSYNC_VSCFR:
1082  if (ost->frame_number == 0 && delta0 >= 0.5) {
1083  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1084  delta = duration;
1085  delta0 = 0;
1086  ost->sync_opts = lrint(sync_ipts);
1087  }
1088  case VSYNC_CFR:
1089  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1090  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1091  nb_frames = 0;
1092  } else if (delta < -1.1)
1093  nb_frames = 0;
1094  else if (delta > 1.1) {
1095  nb_frames = lrintf(delta);
1096  if (delta0 > 1.1)
1097  nb0_frames = lrintf(delta0 - 0.6);
1098  }
1099  break;
1100  case VSYNC_VFR:
1101  if (delta <= -0.6)
1102  nb_frames = 0;
1103  else if (delta > 0.6)
1104  ost->sync_opts = lrint(sync_ipts);
1105  break;
1106  case VSYNC_DROP:
1107  case VSYNC_PASSTHROUGH:
1108  ost->sync_opts = lrint(sync_ipts);
1109  break;
1110  default:
1111  av_assert0(0);
1112  }
1113  }
1114 
1115  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1116  nb0_frames = FFMIN(nb0_frames, nb_frames);
1117 
1118  memmove(ost->last_nb0_frames + 1,
1119  ost->last_nb0_frames,
1120  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1121  ost->last_nb0_frames[0] = nb0_frames;
1122 
1123  if (nb0_frames == 0 && ost->last_dropped) {
1124  nb_frames_drop++;
1126  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1127  ost->frame_number, ost->st->index, ost->last_frame->pts);
1128  }
1129  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1130  if (nb_frames > dts_error_threshold * 30) {
1131  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1132  nb_frames_drop++;
1133  return;
1134  }
1135  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1136  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1137  }
1138  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1139 
1140  /* duplicates frame if needed */
1141  for (i = 0; i < nb_frames; i++) {
1142  AVFrame *in_picture;
1143  av_init_packet(&pkt);
1144  pkt.data = NULL;
1145  pkt.size = 0;
1146 
1147  if (i < nb0_frames && ost->last_frame) {
1148  in_picture = ost->last_frame;
1149  } else
1150  in_picture = next_picture;
1151 
1152  if (!in_picture)
1153  return;
1154 
1155  in_picture->pts = ost->sync_opts;
1156 
1157 #if 1
1158  if (!check_recording_time(ost))
1159 #else
1160  if (ost->frame_number >= ost->max_frames)
1161 #endif
1162  return;
1163 
1164 #if FF_API_LAVF_FMT_RAWPICTURE
1165  if (of->ctx->oformat->flags & AVFMT_RAWPICTURE &&
1166  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1167  /* raw pictures are written as AVPicture structure to
1168  avoid any copies. We support temporarily the older
1169  method. */
1170  if (in_picture->interlaced_frame)
1171  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1172  else
1173  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1174  pkt.data = (uint8_t *)in_picture;
1175  pkt.size = sizeof(AVPicture);
1176  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1177  pkt.flags |= AV_PKT_FLAG_KEY;
1178 
1179  output_packet(of, &pkt, ost);
1180  } else
1181 #endif
1182  {
1183  int forced_keyframe = 0;
1184  double pts_time;
1185 
1187  ost->top_field_first >= 0)
1188  in_picture->top_field_first = !!ost->top_field_first;
1189 
1190  if (in_picture->interlaced_frame) {
1191  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1192  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1193  else
1194  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1195  } else
1196  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1197 
1198  in_picture->quality = enc->global_quality;
1199  in_picture->pict_type = 0;
1200 
1201  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1202  in_picture->pts * av_q2d(enc->time_base) : NAN;
1203  if (ost->forced_kf_index < ost->forced_kf_count &&
1204  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1205  ost->forced_kf_index++;
1206  forced_keyframe = 1;
1207  } else if (ost->forced_keyframes_pexpr) {
1208  double res;
1209  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1212  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1218  res);
1219  if (res) {
1220  forced_keyframe = 1;
1226  }
1227 
1229  } else if ( ost->forced_keyframes
1230  && !strncmp(ost->forced_keyframes, "source", 6)
1231  && in_picture->key_frame==1) {
1232  forced_keyframe = 1;
1233  }
1234 
1235  if (forced_keyframe) {
1236  in_picture->pict_type = AV_PICTURE_TYPE_I;
1237  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1238  }
1239 
1241  if (debug_ts) {
1242  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1243  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1244  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1245  enc->time_base.num, enc->time_base.den);
1246  }
1247 
1248  ost->frames_encoded++;
1249 
1250  ret = avcodec_send_frame(enc, in_picture);
1251  if (ret < 0)
1252  goto error;
1253 
1254  while (1) {
1255  ret = avcodec_receive_packet(enc, &pkt);
1256  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1257  if (ret == AVERROR(EAGAIN))
1258  break;
1259  if (ret < 0)
1260  goto error;
1261 
1262  if (debug_ts) {
1263  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1264  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1265  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1266  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1267  }
1268 
1269  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1270  pkt.pts = ost->sync_opts;
1271 
1272  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1273 
1274  if (debug_ts) {
1275  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1276  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1277  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1278  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1279  }
1280 
1281  frame_size = pkt.size;
1282  output_packet(of, &pkt, ost);
1283 
1284  /* if two pass, output log */
1285  if (ost->logfile && enc->stats_out) {
1286  fprintf(ost->logfile, "%s", enc->stats_out);
1287  }
1288  }
1289  }
1290  ost->sync_opts++;
1291  /*
1292  * For video, number of frames in == number of packets out.
1293  * But there may be reordering, so we can't throw away frames on encoder
1294  * flush, we need to limit them here, before they go into encoder.
1295  */
1296  ost->frame_number++;
1297 
1298  if (vstats_filename && frame_size)
1299  do_video_stats(ost, frame_size);
1300  }
1301 
1302  if (!ost->last_frame)
1303  ost->last_frame = av_frame_alloc();
1304  av_frame_unref(ost->last_frame);
1305  if (next_picture && ost->last_frame)
1306  av_frame_ref(ost->last_frame, next_picture);
1307  else
1308  av_frame_free(&ost->last_frame);
1309 
1310  return;
1311 error:
1312  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1313  exit_program(1);
1314 }
1315 
1316 static double psnr(double d)
1317 {
1318  return -10.0 * log10(d);
1319 }
1320 
1322 {
1323  AVCodecContext *enc;
1324  int frame_number;
1325  double ti1, bitrate, avg_bitrate;
1326 
1327  /* this is executed just the first time do_video_stats is called */
1328  if (!vstats_file) {
1329  vstats_file = fopen(vstats_filename, "w");
1330  if (!vstats_file) {
1331  perror("fopen");
1332  exit_program(1);
1333  }
1334  }
1335 
1336  enc = ost->enc_ctx;
1337  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1338  frame_number = ost->st->nb_frames;
1339  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1340  ost->quality / (float)FF_QP2LAMBDA);
1341 
1342  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1343  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1344 
1345  fprintf(vstats_file,"f_size= %6d ", frame_size);
1346  /* compute pts value */
1347  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1348  if (ti1 < 0.01)
1349  ti1 = 0.01;
1350 
1351  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1352  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1353  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1354  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1355  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1356  }
1357 }
1358 
1360 {
1361  OutputFile *of = output_files[ost->file_index];
1362  int i;
1363 
1365 
1366  if (of->shortest) {
1367  for (i = 0; i < of->ctx->nb_streams; i++)
1368  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1369  }
1370 }
1371 
1372 /**
1373  * Get and encode new output from any of the filtergraphs, without causing
1374  * activity.
1375  *
1376  * @return 0 for success, <0 for severe errors
1377  */
1378 static int reap_filters(int flush)
1379 {
1380  AVFrame *filtered_frame = NULL;
1381  int i;
1382 
1383  /* Reap all buffers present in the buffer sinks */
1384  for (i = 0; i < nb_output_streams; i++) {
1385  OutputStream *ost = output_streams[i];
1386  OutputFile *of = output_files[ost->file_index];
1388  AVCodecContext *enc = ost->enc_ctx;
1389  int ret = 0;
1390 
1391  if (!ost->filter)
1392  continue;
1393  filter = ost->filter->filter;
1394 
1395  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1396  return AVERROR(ENOMEM);
1397  }
1398  filtered_frame = ost->filtered_frame;
1399 
1400  while (1) {
1401  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1402  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1404  if (ret < 0) {
1405  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1407  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1408  } else if (flush && ret == AVERROR_EOF) {
1409  if (filter->inputs[0]->type == AVMEDIA_TYPE_VIDEO)
1410  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1411  }
1412  break;
1413  }
1414  if (ost->finished) {
1415  av_frame_unref(filtered_frame);
1416  continue;
1417  }
1418  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1419  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1420  AVRational tb = enc->time_base;
1421  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1422 
1423  tb.den <<= extra_bits;
1424  float_pts =
1425  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1426  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1427  float_pts /= 1 << extra_bits;
1428  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1429  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1430 
1431  filtered_frame->pts =
1432  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1433  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1434  }
1435  //if (ost->source_index >= 0)
1436  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1437 
1438  switch (filter->inputs[0]->type) {
1439  case AVMEDIA_TYPE_VIDEO:
1440  if (!ost->frame_aspect_ratio.num)
1441  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1442 
1443  if (debug_ts) {
1444  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1445  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1446  float_pts,
1447  enc->time_base.num, enc->time_base.den);
1448  }
1449 
1450  do_video_out(of, ost, filtered_frame, float_pts);
1451  break;
1452  case AVMEDIA_TYPE_AUDIO:
1453  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1454  enc->channels != av_frame_get_channels(filtered_frame)) {
1456  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1457  break;
1458  }
1459  do_audio_out(of, ost, filtered_frame);
1460  break;
1461  default:
1462  // TODO support subtitle filters
1463  av_assert0(0);
1464  }
1465 
1466  av_frame_unref(filtered_frame);
1467  }
1468  }
1469 
1470  return 0;
1471 }
1472 
1473 static void print_final_stats(int64_t total_size)
1474 {
1475  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1476  uint64_t subtitle_size = 0;
1477  uint64_t data_size = 0;
1478  float percent = -1.0;
1479  int i, j;
1480  int pass1_used = 1;
1481 
1482  for (i = 0; i < nb_output_streams; i++) {
1483  OutputStream *ost = output_streams[i];
1484  switch (ost->enc_ctx->codec_type) {
1485  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1486  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1487  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1488  default: other_size += ost->data_size; break;
1489  }
1490  extra_size += ost->enc_ctx->extradata_size;
1491  data_size += ost->data_size;
1494  pass1_used = 0;
1495  }
1496 
1497  if (data_size && total_size>0 && total_size >= data_size)
1498  percent = 100.0 * (total_size - data_size) / data_size;
1499 
1500  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1501  video_size / 1024.0,
1502  audio_size / 1024.0,
1503  subtitle_size / 1024.0,
1504  other_size / 1024.0,
1505  extra_size / 1024.0);
1506  if (percent >= 0.0)
1507  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1508  else
1509  av_log(NULL, AV_LOG_INFO, "unknown");
1510  av_log(NULL, AV_LOG_INFO, "\n");
1511 
1512  /* print verbose per-stream stats */
1513  for (i = 0; i < nb_input_files; i++) {
1514  InputFile *f = input_files[i];
1515  uint64_t total_packets = 0, total_size = 0;
1516 
1517  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1518  i, f->ctx->filename);
1519 
1520  for (j = 0; j < f->nb_streams; j++) {
1521  InputStream *ist = input_streams[f->ist_index + j];
1522  enum AVMediaType type = ist->dec_ctx->codec_type;
1523 
1524  total_size += ist->data_size;
1525  total_packets += ist->nb_packets;
1526 
1527  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1528  i, j, media_type_string(type));
1529  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1530  ist->nb_packets, ist->data_size);
1531 
1532  if (ist->decoding_needed) {
1533  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1534  ist->frames_decoded);
1535  if (type == AVMEDIA_TYPE_AUDIO)
1536  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1537  av_log(NULL, AV_LOG_VERBOSE, "; ");
1538  }
1539 
1540  av_log(NULL, AV_LOG_VERBOSE, "\n");
1541  }
1542 
1543  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1544  total_packets, total_size);
1545  }
1546 
1547  for (i = 0; i < nb_output_files; i++) {
1548  OutputFile *of = output_files[i];
1549  uint64_t total_packets = 0, total_size = 0;
1550 
1551  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1552  i, of->ctx->filename);
1553 
1554  for (j = 0; j < of->ctx->nb_streams; j++) {
1555  OutputStream *ost = output_streams[of->ost_index + j];
1556  enum AVMediaType type = ost->enc_ctx->codec_type;
1557 
1558  total_size += ost->data_size;
1559  total_packets += ost->packets_written;
1560 
1561  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1562  i, j, media_type_string(type));
1563  if (ost->encoding_needed) {
1564  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1565  ost->frames_encoded);
1566  if (type == AVMEDIA_TYPE_AUDIO)
1567  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1568  av_log(NULL, AV_LOG_VERBOSE, "; ");
1569  }
1570 
1571  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1572  ost->packets_written, ost->data_size);
1573 
1574  av_log(NULL, AV_LOG_VERBOSE, "\n");
1575  }
1576 
1577  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1578  total_packets, total_size);
1579  }
1580  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1581  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1582  if (pass1_used) {
1583  av_log(NULL, AV_LOG_WARNING, "\n");
1584  } else {
1585  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1586  }
1587  }
1588 }
1589 
1590 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1591 {
1592  char buf[1024];
1593  AVBPrint buf_script;
1594  OutputStream *ost;
1595  AVFormatContext *oc;
1596  int64_t total_size;
1597  AVCodecContext *enc;
1598  int frame_number, vid, i;
1599  double bitrate;
1600  double speed;
1601  int64_t pts = INT64_MIN + 1;
1602  static int64_t last_time = -1;
1603  static int qp_histogram[52];
1604  int hours, mins, secs, us;
1605  int ret;
1606  float t;
1607 
1608  if (!print_stats && !is_last_report && !progress_avio)
1609  return;
1610 
1611  if (!is_last_report) {
1612  if (last_time == -1) {
1613  last_time = cur_time;
1614  return;
1615  }
1616  if ((cur_time - last_time) < 500000)
1617  return;
1618  last_time = cur_time;
1619  }
1620 
1621  t = (cur_time-timer_start) / 1000000.0;
1622 
1623 
1624  oc = output_files[0]->ctx;
1625 
1626  total_size = avio_size(oc->pb);
1627  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1628  total_size = avio_tell(oc->pb);
1629 
1630  buf[0] = '\0';
1631  vid = 0;
1632  av_bprint_init(&buf_script, 0, 1);
1633  for (i = 0; i < nb_output_streams; i++) {
1634  float q = -1;
1635  ost = output_streams[i];
1636  enc = ost->enc_ctx;
1637  if (!ost->stream_copy)
1638  q = ost->quality / (float) FF_QP2LAMBDA;
1639 
1640  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1641  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1642  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1643  ost->file_index, ost->index, q);
1644  }
1645  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1646  float fps;
1647 
1648  frame_number = ost->frame_number;
1649  fps = t > 1 ? frame_number / t : 0;
1650  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1651  frame_number, fps < 9.95, fps, q);
1652  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1653  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1654  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1655  ost->file_index, ost->index, q);
1656  if (is_last_report)
1657  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1658  if (qp_hist) {
1659  int j;
1660  int qp = lrintf(q);
1661  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1662  qp_histogram[qp]++;
1663  for (j = 0; j < 32; j++)
1664  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", av_log2(qp_histogram[j] + 1));
1665  }
1666 
1667  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1668  int j;
1669  double error, error_sum = 0;
1670  double scale, scale_sum = 0;
1671  double p;
1672  char type[3] = { 'Y','U','V' };
1673  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1674  for (j = 0; j < 3; j++) {
1675  if (is_last_report) {
1676  error = enc->error[j];
1677  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1678  } else {
1679  error = ost->error[j];
1680  scale = enc->width * enc->height * 255.0 * 255.0;
1681  }
1682  if (j)
1683  scale /= 4;
1684  error_sum += error;
1685  scale_sum += scale;
1686  p = psnr(error / scale);
1687  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1688  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1689  ost->file_index, ost->index, type[j] | 32, p);
1690  }
1691  p = psnr(error_sum / scale_sum);
1692  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1693  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1694  ost->file_index, ost->index, p);
1695  }
1696  vid = 1;
1697  }
1698  /* compute min output value */
1700  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1701  ost->st->time_base, AV_TIME_BASE_Q));
1702  if (is_last_report)
1703  nb_frames_drop += ost->last_dropped;
1704  }
1705 
1706  secs = FFABS(pts) / AV_TIME_BASE;
1707  us = FFABS(pts) % AV_TIME_BASE;
1708  mins = secs / 60;
1709  secs %= 60;
1710  hours = mins / 60;
1711  mins %= 60;
1712 
1713  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1714  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1715 
1716  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1717  "size=N/A time=");
1718  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1719  "size=%8.0fkB time=", total_size / 1024.0);
1720  if (pts < 0)
1721  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1722  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1723  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1724  (100 * us) / AV_TIME_BASE);
1725 
1726  if (bitrate < 0) {
1727  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1728  av_bprintf(&buf_script, "bitrate=N/A\n");
1729  }else{
1730  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1731  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1732  }
1733 
1734  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1735  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1736  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1737  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1738  hours, mins, secs, us);
1739 
1741  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1743  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1744  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1745 
1746  if (speed < 0) {
1747  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=N/A");
1748  av_bprintf(&buf_script, "speed=N/A\n");
1749  } else {
1750  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf)," speed=%4.3gx", speed);
1751  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1752  }
1753 
1754  if (print_stats || is_last_report) {
1755  const char end = is_last_report ? '\n' : '\r';
1756  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1757  fprintf(stderr, "%s %c", buf, end);
1758  } else
1759  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1760 
1761  fflush(stderr);
1762  }
1763 
1764  if (progress_avio) {
1765  av_bprintf(&buf_script, "progress=%s\n",
1766  is_last_report ? "end" : "continue");
1767  avio_write(progress_avio, buf_script.str,
1768  FFMIN(buf_script.len, buf_script.size - 1));
1769  avio_flush(progress_avio);
1770  av_bprint_finalize(&buf_script, NULL);
1771  if (is_last_report) {
1772  if ((ret = avio_closep(&progress_avio)) < 0)
1774  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1775  }
1776  }
1777 
1778  if (is_last_report)
1779  print_final_stats(total_size);
1780 }
1781 
1782 static void flush_encoders(void)
1783 {
1784  int i, ret;
1785 
1786  for (i = 0; i < nb_output_streams; i++) {
1787  OutputStream *ost = output_streams[i];
1788  AVCodecContext *enc = ost->enc_ctx;
1789  OutputFile *of = output_files[ost->file_index];
1790  int stop_encoding = 0;
1791 
1792  if (!ost->encoding_needed)
1793  continue;
1794 
1795  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1796  continue;
1797 #if FF_API_LAVF_FMT_RAWPICTURE
1798  if (enc->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == AV_CODEC_ID_RAWVIDEO)
1799  continue;
1800 #endif
1801 
1803  continue;
1804 
1805  avcodec_send_frame(enc, NULL);
1806 
1807  for (;;) {
1808  const char *desc = NULL;
1809 
1810  switch (enc->codec_type) {
1811  case AVMEDIA_TYPE_AUDIO:
1812  desc = "audio";
1813  break;
1814  case AVMEDIA_TYPE_VIDEO:
1815  desc = "video";
1816  break;
1817  default:
1818  av_assert0(0);
1819  }
1820 
1821  if (1) {
1822  AVPacket pkt;
1823  int pkt_size;
1824  av_init_packet(&pkt);
1825  pkt.data = NULL;
1826  pkt.size = 0;
1827 
1829  ret = avcodec_receive_packet(enc, &pkt);
1830  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1831  if (ret < 0 && ret != AVERROR_EOF) {
1832  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1833  desc,
1834  av_err2str(ret));
1835  exit_program(1);
1836  }
1837  if (ost->logfile && enc->stats_out) {
1838  fprintf(ost->logfile, "%s", enc->stats_out);
1839  }
1840  if (ret == AVERROR_EOF) {
1841  stop_encoding = 1;
1842  break;
1843  }
1844  if (ost->finished & MUXER_FINISHED) {
1845  av_packet_unref(&pkt);
1846  continue;
1847  }
1848  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1849  pkt_size = pkt.size;
1850  output_packet(of, &pkt, ost);
1852  do_video_stats(ost, pkt_size);
1853  }
1854  }
1855 
1856  if (stop_encoding)
1857  break;
1858  }
1859  }
1860 }
1861 
1862 /*
1863  * Check whether a packet from ist should be written into ost at this time
1864  */
1866 {
1867  OutputFile *of = output_files[ost->file_index];
1868  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1869 
1870  if (ost->source_index != ist_index)
1871  return 0;
1872 
1873  if (ost->finished)
1874  return 0;
1875 
1876  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1877  return 0;
1878 
1879  return 1;
1880 }
1881 
1882 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1883 {
1884  OutputFile *of = output_files[ost->file_index];
1885  InputFile *f = input_files [ist->file_index];
1886  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1887  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1888  AVPicture pict;
1889  AVPacket opkt;
1890 
1891  av_init_packet(&opkt);
1892 
1893  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1895  return;
1896 
1897  if (!ost->frame_number && !ost->copy_prior_start) {
1898  int64_t comp_start = start_time;
1899  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1900  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1901  if (pkt->pts == AV_NOPTS_VALUE ?
1902  ist->pts < comp_start :
1903  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1904  return;
1905  }
1906 
1907  if (of->recording_time != INT64_MAX &&
1908  ist->pts >= of->recording_time + start_time) {
1909  close_output_stream(ost);
1910  return;
1911  }
1912 
1913  if (f->recording_time != INT64_MAX) {
1914  start_time = f->ctx->start_time;
1915  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
1916  start_time += f->start_time;
1917  if (ist->pts >= f->recording_time + start_time) {
1918  close_output_stream(ost);
1919  return;
1920  }
1921  }
1922 
1923  /* force the input stream PTS */
1924  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1925  ost->sync_opts++;
1926 
1927  if (pkt->pts != AV_NOPTS_VALUE)
1928  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1929  else
1930  opkt.pts = AV_NOPTS_VALUE;
1931 
1932  if (pkt->dts == AV_NOPTS_VALUE)
1933  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1934  else
1935  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1936  opkt.dts -= ost_tb_start_time;
1937 
1938  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1940  if(!duration)
1941  duration = ist->dec_ctx->frame_size;
1942  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1944  ost->st->time_base) - ost_tb_start_time;
1945  }
1946 
1947  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1948  opkt.flags = pkt->flags;
1949  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1950  if ( ost->st->codecpar->codec_id != AV_CODEC_ID_H264
1951  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG1VIDEO
1952  && ost->st->codecpar->codec_id != AV_CODEC_ID_MPEG2VIDEO
1953  && ost->st->codecpar->codec_id != AV_CODEC_ID_VC1
1954  ) {
1955  int ret = av_parser_change(ost->parser, ost->parser_avctx,
1956  &opkt.data, &opkt.size,
1957  pkt->data, pkt->size,
1959  if (ret < 0) {
1960  av_log(NULL, AV_LOG_FATAL, "av_parser_change failed: %s\n",
1961  av_err2str(ret));
1962  exit_program(1);
1963  }
1964  if (ret) {
1965  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1966  if (!opkt.buf)
1967  exit_program(1);
1968  }
1969  } else {
1970  opkt.data = pkt->data;
1971  opkt.size = pkt->size;
1972  }
1973  av_copy_packet_side_data(&opkt, pkt);
1974 
1975 #if FF_API_LAVF_FMT_RAWPICTURE
1976  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1977  ost->st->codecpar->codec_id == AV_CODEC_ID_RAWVIDEO &&
1978  (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1979  /* store AVPicture in AVPacket, as expected by the output format */
1980  int ret = avpicture_fill(&pict, opkt.data, ost->st->codecpar->format, ost->st->codecpar->width, ost->st->codecpar->height);
1981  if (ret < 0) {
1982  av_log(NULL, AV_LOG_FATAL, "avpicture_fill failed: %s\n",
1983  av_err2str(ret));
1984  exit_program(1);
1985  }
1986  opkt.data = (uint8_t *)&pict;
1987  opkt.size = sizeof(AVPicture);
1988  opkt.flags |= AV_PKT_FLAG_KEY;
1989  }
1990 #endif
1991 
1992  output_packet(of, &opkt, ost);
1993 }
1994 
1996 {
1997  AVCodecContext *dec = ist->dec_ctx;
1998 
1999  if (!dec->channel_layout) {
2000  char layout_name[256];
2001 
2002  if (dec->channels > ist->guess_layout_max)
2003  return 0;
2005  if (!dec->channel_layout)
2006  return 0;
2007  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2008  dec->channels, dec->channel_layout);
2009  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2010  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2011  }
2012  return 1;
2013 }
2014 
2015 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2016 {
2017  if (*got_output || ret<0)
2018  decode_error_stat[ret<0] ++;
2019 
2020  if (ret < 0 && exit_on_error)
2021  exit_program(1);
2022 
2023  if (exit_on_error && *got_output && ist) {
2025  av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
2026  exit_program(1);
2027  }
2028  }
2029 }
2030 
2031 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2032 // There is the following difference: if you got a frame, you must call
2033 // it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
2034 // (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
2035 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2036 {
2037  int ret;
2038 
2039  *got_frame = 0;
2040 
2041  if (pkt) {
2042  ret = avcodec_send_packet(avctx, pkt);
2043  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2044  // decoded frames with avcodec_receive_frame() until done.
2045  if (ret < 0 && ret != AVERROR_EOF)
2046  return ret;
2047  }
2048 
2049  ret = avcodec_receive_frame(avctx, frame);
2050  if (ret < 0 && ret != AVERROR(EAGAIN))
2051  return ret;
2052  if (ret >= 0)
2053  *got_frame = 1;
2054 
2055  return 0;
2056 }
2057 
2058 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
2059 {
2060  AVFrame *decoded_frame, *f;
2061  AVCodecContext *avctx = ist->dec_ctx;
2062  int i, ret, err = 0, resample_changed;
2063  AVRational decoded_frame_tb;
2064 
2065  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2066  return AVERROR(ENOMEM);
2067  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2068  return AVERROR(ENOMEM);
2069  decoded_frame = ist->decoded_frame;
2070 
2072  ret = decode(avctx, decoded_frame, got_output, pkt);
2073  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2074 
2075  if (ret >= 0 && avctx->sample_rate <= 0) {
2076  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2077  ret = AVERROR_INVALIDDATA;
2078  }
2079 
2080  if (ret != AVERROR_EOF)
2081  check_decode_result(ist, got_output, ret);
2082 
2083  if (!*got_output || ret < 0)
2084  return ret;
2085 
2086  ist->samples_decoded += decoded_frame->nb_samples;
2087  ist->frames_decoded++;
2088 
2089 #if 1
2090  /* increment next_dts to use for the case where the input stream does not
2091  have timestamps or there are multiple frames in the packet */
2092  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2093  avctx->sample_rate;
2094  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2095  avctx->sample_rate;
2096 #endif
2097 
2098  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
2099  ist->resample_channels != avctx->channels ||
2100  ist->resample_channel_layout != decoded_frame->channel_layout ||
2101  ist->resample_sample_rate != decoded_frame->sample_rate;
2102  if (resample_changed) {
2103  char layout1[64], layout2[64];
2104 
2105  if (!guess_input_channel_layout(ist)) {
2106  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
2107  "layout for Input Stream #%d.%d\n", ist->file_index,
2108  ist->st->index);
2109  exit_program(1);
2110  }
2111  decoded_frame->channel_layout = avctx->channel_layout;
2112 
2113  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
2115  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
2116  decoded_frame->channel_layout);
2117 
2119  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
2120  ist->file_index, ist->st->index,
2122  ist->resample_channels, layout1,
2123  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
2124  avctx->channels, layout2);
2125 
2126  ist->resample_sample_fmt = decoded_frame->format;
2127  ist->resample_sample_rate = decoded_frame->sample_rate;
2128  ist->resample_channel_layout = decoded_frame->channel_layout;
2129  ist->resample_channels = avctx->channels;
2130 
2131  for (i = 0; i < nb_filtergraphs; i++)
2132  if (ist_in_filtergraph(filtergraphs[i], ist)) {
2133  FilterGraph *fg = filtergraphs[i];
2134  if (configure_filtergraph(fg) < 0) {
2135  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2136  exit_program(1);
2137  }
2138  }
2139  }
2140 
2141  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2142  decoded_frame_tb = ist->st->time_base;
2143  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2144  decoded_frame->pts = pkt->pts;
2145  decoded_frame_tb = ist->st->time_base;
2146  }else {
2147  decoded_frame->pts = ist->dts;
2148  decoded_frame_tb = AV_TIME_BASE_Q;
2149  }
2150  if (decoded_frame->pts != AV_NOPTS_VALUE)
2151  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2152  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2153  (AVRational){1, avctx->sample_rate});
2154  ist->nb_samples = decoded_frame->nb_samples;
2155  for (i = 0; i < ist->nb_filters; i++) {
2156  if (i < ist->nb_filters - 1) {
2157  f = ist->filter_frame;
2158  err = av_frame_ref(f, decoded_frame);
2159  if (err < 0)
2160  break;
2161  } else
2162  f = decoded_frame;
2163  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
2165  if (err == AVERROR_EOF)
2166  err = 0; /* ignore */
2167  if (err < 0)
2168  break;
2169  }
2170  decoded_frame->pts = AV_NOPTS_VALUE;
2171 
2172  av_frame_unref(ist->filter_frame);
2173  av_frame_unref(decoded_frame);
2174  return err < 0 ? err : ret;
2175 }
2176 
2177 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
2178 {
2179  AVFrame *decoded_frame, *f;
2180  int i, ret = 0, err = 0, resample_changed;
2181  int64_t best_effort_timestamp;
2182  int64_t dts = AV_NOPTS_VALUE;
2183  AVRational *frame_sample_aspect;
2184  AVPacket avpkt;
2185 
2186  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2187  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2188  // skip the packet.
2189  if (!eof && pkt && pkt->size == 0)
2190  return 0;
2191 
2192  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2193  return AVERROR(ENOMEM);
2194  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2195  return AVERROR(ENOMEM);
2196  decoded_frame = ist->decoded_frame;
2197  if (ist->dts != AV_NOPTS_VALUE)
2198  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2199  if (pkt) {
2200  avpkt = *pkt;
2201  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2202  }
2203 
2204  // The old code used to set dts on the drain packet, which does not work
2205  // with the new API anymore.
2206  if (eof) {
2207  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2208  if (!new)
2209  return AVERROR(ENOMEM);
2210  ist->dts_buffer = new;
2211  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2212  }
2213 
2215  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2216  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2217 
2218  // The following line may be required in some cases where there is no parser
2219  // or the parser does not has_b_frames correctly
2220  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2221  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2222  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2223  } else
2225  "video_delay is larger in decoder than demuxer %d > %d.\n"
2226  "If you want to help, upload a sample "
2227  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2228  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)",
2229  ist->dec_ctx->has_b_frames,
2230  ist->st->codecpar->video_delay);
2231  }
2232 
2233  if (ret != AVERROR_EOF)
2234  check_decode_result(ist, got_output, ret);
2235 
2236  if (*got_output && ret >= 0) {
2237  if (ist->dec_ctx->width != decoded_frame->width ||
2238  ist->dec_ctx->height != decoded_frame->height ||
2239  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2240  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2241  decoded_frame->width,
2242  decoded_frame->height,
2243  decoded_frame->format,
2244  ist->dec_ctx->width,
2245  ist->dec_ctx->height,
2246  ist->dec_ctx->pix_fmt);
2247  }
2248  }
2249 
2250  if (!*got_output || ret < 0)
2251  return ret;
2252 
2253  if(ist->top_field_first>=0)
2254  decoded_frame->top_field_first = ist->top_field_first;
2255 
2256  ist->frames_decoded++;
2257 
2258  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2259  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2260  if (err < 0)
2261  goto fail;
2262  }
2263  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2264 
2265  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2266 
2267  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2268  best_effort_timestamp = ist->dts_buffer[0];
2269 
2270  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2271  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2272  ist->nb_dts_buffer--;
2273  }
2274 
2275  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2276  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2277 
2278  if (ts != AV_NOPTS_VALUE)
2279  ist->next_pts = ist->pts = ts;
2280  }
2281 
2282  if (debug_ts) {
2283  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2284  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2285  ist->st->index, av_ts2str(decoded_frame->pts),
2286  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2287  best_effort_timestamp,
2288  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2289  decoded_frame->key_frame, decoded_frame->pict_type,
2290  ist->st->time_base.num, ist->st->time_base.den);
2291  }
2292 
2293  if (ist->st->sample_aspect_ratio.num)
2294  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2295 
2296  resample_changed = ist->resample_width != decoded_frame->width ||
2297  ist->resample_height != decoded_frame->height ||
2298  ist->resample_pix_fmt != decoded_frame->format;
2299  if (resample_changed) {
2300  av_log(NULL, AV_LOG_INFO,
2301  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2302  ist->file_index, ist->st->index,
2304  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2305 
2306  ist->resample_width = decoded_frame->width;
2307  ist->resample_height = decoded_frame->height;
2308  ist->resample_pix_fmt = decoded_frame->format;
2309 
2310  for (i = 0; i < nb_filtergraphs; i++) {
2311  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2312  configure_filtergraph(filtergraphs[i]) < 0) {
2313  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2314  exit_program(1);
2315  }
2316  }
2317  }
2318 
2319  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2320  for (i = 0; i < ist->nb_filters; i++) {
2321  if (!frame_sample_aspect->num)
2322  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2323 
2324  if (i < ist->nb_filters - 1) {
2325  f = ist->filter_frame;
2326  err = av_frame_ref(f, decoded_frame);
2327  if (err < 0)
2328  break;
2329  } else
2330  f = decoded_frame;
2332  if (err == AVERROR_EOF) {
2333  err = 0; /* ignore */
2334  } else if (err < 0) {
2335  av_log(NULL, AV_LOG_FATAL,
2336  "Failed to inject frame into filter network: %s\n", av_err2str(err));
2337  exit_program(1);
2338  }
2339  }
2340 
2341 fail:
2343  av_frame_unref(decoded_frame);
2344  return err < 0 ? err : ret;
2345 }
2346 
2347 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2348 {
2349  AVSubtitle subtitle;
2350  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2351  &subtitle, got_output, pkt);
2352 
2353  check_decode_result(NULL, got_output, ret);
2354 
2355  if (ret < 0 || !*got_output) {
2356  if (!pkt->size)
2357  sub2video_flush(ist);
2358  return ret;
2359  }
2360 
2361  if (ist->fix_sub_duration) {
2362  int end = 1;
2363  if (ist->prev_sub.got_output) {
2364  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2365  1000, AV_TIME_BASE);
2366  if (end < ist->prev_sub.subtitle.end_display_time) {
2367  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2368  "Subtitle duration reduced from %d to %d%s\n",
2370  end <= 0 ? ", dropping it" : "");
2372  }
2373  }
2374  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2375  FFSWAP(int, ret, ist->prev_sub.ret);
2376  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2377  if (end <= 0)
2378  goto out;
2379  }
2380 
2381  if (!*got_output)
2382  return ret;
2383 
2384  sub2video_update(ist, &subtitle);
2385 
2386  if (!subtitle.num_rects)
2387  goto out;
2388 
2389  ist->frames_decoded++;
2390 
2391  for (i = 0; i < nb_output_streams; i++) {
2392  OutputStream *ost = output_streams[i];
2393 
2394  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2395  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2396  continue;
2397 
2398  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2399  }
2400 
2401 out:
2402  avsubtitle_free(&subtitle);
2403  return ret;
2404 }
2405 
2407 {
2408  int i, ret;
2409  for (i = 0; i < ist->nb_filters; i++) {
2410  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
2411  if (ret < 0)
2412  return ret;
2413  }
2414  return 0;
2415 }
2416 
2417 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2418 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2419 {
2420  int ret = 0, i;
2421  int repeating = 0;
2422  int eof_reached = 0;
2423 
2424  AVPacket avpkt;
2425  if (!ist->saw_first_ts) {
2426  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2427  ist->pts = 0;
2428  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2429  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2430  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2431  }
2432  ist->saw_first_ts = 1;
2433  }
2434 
2435  if (ist->next_dts == AV_NOPTS_VALUE)
2436  ist->next_dts = ist->dts;
2437  if (ist->next_pts == AV_NOPTS_VALUE)
2438  ist->next_pts = ist->pts;
2439 
2440  if (!pkt) {
2441  /* EOF handling */
2442  av_init_packet(&avpkt);
2443  avpkt.data = NULL;
2444  avpkt.size = 0;
2445  } else {
2446  avpkt = *pkt;
2447  }
2448 
2449  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2450  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2451  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2452  ist->next_pts = ist->pts = ist->dts;
2453  }
2454 
2455  // while we have more to decode or while the decoder did output something on EOF
2456  while (ist->decoding_needed) {
2457  int duration = 0;
2458  int got_output = 0;
2459 
2460  ist->pts = ist->next_pts;
2461  ist->dts = ist->next_dts;
2462 
2463  switch (ist->dec_ctx->codec_type) {
2464  case AVMEDIA_TYPE_AUDIO:
2465  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output);
2466  break;
2467  case AVMEDIA_TYPE_VIDEO:
2468  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt);
2469  if (!repeating || !pkt || got_output) {
2470  if (pkt && pkt->duration) {
2471  duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2472  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2474  duration = ((int64_t)AV_TIME_BASE *
2475  ist->dec_ctx->framerate.den * ticks) /
2477  }
2478 
2479  if(ist->dts != AV_NOPTS_VALUE && duration) {
2480  ist->next_dts += duration;
2481  }else
2482  ist->next_dts = AV_NOPTS_VALUE;
2483  }
2484 
2485  if (got_output)
2486  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2487  break;
2488  case AVMEDIA_TYPE_SUBTITLE:
2489  if (repeating)
2490  break;
2491  ret = transcode_subtitles(ist, &avpkt, &got_output);
2492  if (!pkt && ret >= 0)
2493  ret = AVERROR_EOF;
2494  break;
2495  default:
2496  return -1;
2497  }
2498 
2499  if (ret == AVERROR_EOF) {
2500  eof_reached = 1;
2501  break;
2502  }
2503 
2504  if (ret < 0) {
2505  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2506  ist->file_index, ist->st->index, av_err2str(ret));
2507  if (exit_on_error)
2508  exit_program(1);
2509  // Decoding might not terminate if we're draining the decoder, and
2510  // the decoder keeps returning an error.
2511  // This should probably be considered a libavcodec issue.
2512  // Sample: fate-vsynth1-dnxhd-720p-hr-lb
2513  if (!pkt)
2514  eof_reached = 1;
2515  break;
2516  }
2517 
2518  if (!got_output)
2519  break;
2520 
2521  // During draining, we might get multiple output frames in this loop.
2522  // ffmpeg.c does not drain the filter chain on configuration changes,
2523  // which means if we send multiple frames at once to the filters, and
2524  // one of those frames changes configuration, the buffered frames will
2525  // be lost. This can upset certain FATE tests.
2526  // Decode only 1 frame per call on EOF to appease these FATE tests.
2527  // The ideal solution would be to rewrite decoding to use the new
2528  // decoding API in a better way.
2529  if (!pkt)
2530  break;
2531 
2532  repeating = 1;
2533  }
2534 
2535  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2536  /* except when looping we need to flush but not to send an EOF */
2537  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2538  int ret = send_filter_eof(ist);
2539  if (ret < 0) {
2540  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2541  exit_program(1);
2542  }
2543  }
2544 
2545  /* handle stream copy */
2546  if (!ist->decoding_needed) {
2547  ist->dts = ist->next_dts;
2548  switch (ist->dec_ctx->codec_type) {
2549  case AVMEDIA_TYPE_AUDIO:
2550  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2551  ist->dec_ctx->sample_rate;
2552  break;
2553  case AVMEDIA_TYPE_VIDEO:
2554  if (ist->framerate.num) {
2555  // TODO: Remove work-around for c99-to-c89 issue 7
2556  AVRational time_base_q = AV_TIME_BASE_Q;
2557  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2558  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2559  } else if (pkt->duration) {
2560  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2561  } else if(ist->dec_ctx->framerate.num != 0) {
2562  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2563  ist->next_dts += ((int64_t)AV_TIME_BASE *
2564  ist->dec_ctx->framerate.den * ticks) /
2566  }
2567  break;
2568  }
2569  ist->pts = ist->dts;
2570  ist->next_pts = ist->next_dts;
2571  }
2572  for (i = 0; pkt && i < nb_output_streams; i++) {
2573  OutputStream *ost = output_streams[i];
2574 
2575  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2576  continue;
2577 
2578  do_streamcopy(ist, ost, pkt);
2579  }
2580 
2581  return !eof_reached;
2582 }
2583 
2584 static void print_sdp(void)
2585 {
2586  char sdp[16384];
2587  int i;
2588  int j;
2589  AVIOContext *sdp_pb;
2590  AVFormatContext **avc;
2591 
2592  for (i = 0; i < nb_output_files; i++) {
2593  if (!output_files[i]->header_written)
2594  return;
2595  }
2596 
2597  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2598  if (!avc)
2599  exit_program(1);
2600  for (i = 0, j = 0; i < nb_output_files; i++) {
2601  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2602  avc[j] = output_files[i]->ctx;
2603  j++;
2604  }
2605  }
2606 
2607  if (!j)
2608  goto fail;
2609 
2610  av_sdp_create(avc, j, sdp, sizeof(sdp));
2611 
2612  if (!sdp_filename) {
2613  printf("SDP:\n%s\n", sdp);
2614  fflush(stdout);
2615  } else {
2616  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2617  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2618  } else {
2619  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2620  avio_closep(&sdp_pb);
2622  }
2623  }
2624 
2625 fail:
2626  av_freep(&avc);
2627 }
2628 
2630 {
2631  int i;
2632  for (i = 0; hwaccels[i].name; i++)
2633  if (hwaccels[i].pix_fmt == pix_fmt)
2634  return &hwaccels[i];
2635  return NULL;
2636 }
2637 
2639 {
2640  InputStream *ist = s->opaque;
2641  const enum AVPixelFormat *p;
2642  int ret;
2643 
2644  for (p = pix_fmts; *p != -1; p++) {
2646  const HWAccel *hwaccel;
2647 
2648  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2649  break;
2650 
2651  hwaccel = get_hwaccel(*p);
2652  if (!hwaccel ||
2653  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2654  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2655  continue;
2656 
2657  ret = hwaccel->init(s);
2658  if (ret < 0) {
2659  if (ist->hwaccel_id == hwaccel->id) {
2661  "%s hwaccel requested for input stream #%d:%d, "
2662  "but cannot be initialized.\n", hwaccel->name,
2663  ist->file_index, ist->st->index);
2664  return AV_PIX_FMT_NONE;
2665  }
2666  continue;
2667  }
2668 
2669  if (ist->hw_frames_ctx) {
2671  if (!s->hw_frames_ctx)
2672  return AV_PIX_FMT_NONE;
2673  }
2674 
2675  ist->active_hwaccel_id = hwaccel->id;
2676  ist->hwaccel_pix_fmt = *p;
2677  break;
2678  }
2679 
2680  return *p;
2681 }
2682 
2684 {
2685  InputStream *ist = s->opaque;
2686 
2687  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2688  return ist->hwaccel_get_buffer(s, frame, flags);
2689 
2690  return avcodec_default_get_buffer2(s, frame, flags);
2691 }
2692 
2693 static int init_input_stream(int ist_index, char *error, int error_len)
2694 {
2695  int ret;
2696  InputStream *ist = input_streams[ist_index];
2697 
2698  if (ist->decoding_needed) {
2699  AVCodec *codec = ist->dec;
2700  if (!codec) {
2701  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2702  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2703  return AVERROR(EINVAL);
2704  }
2705 
2706  ist->dec_ctx->opaque = ist;
2707  ist->dec_ctx->get_format = get_format;
2708  ist->dec_ctx->get_buffer2 = get_buffer;
2709  ist->dec_ctx->thread_safe_callbacks = 1;
2710 
2711  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2712  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2713  (ist->decoding_needed & DECODING_FOR_OST)) {
2714  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2716  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2717  }
2718 
2719  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2720 
2721  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2722  * audio, and video decoders such as cuvid or mediacodec */
2724 
2725  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2726  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2727  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2728  if (ret == AVERROR_EXPERIMENTAL)
2729  abort_codec_experimental(codec, 0);
2730 
2731  snprintf(error, error_len,
2732  "Error while opening decoder for input stream "
2733  "#%d:%d : %s",
2734  ist->file_index, ist->st->index, av_err2str(ret));
2735  return ret;
2736  }
2738  }
2739 
2740  ist->next_pts = AV_NOPTS_VALUE;
2741  ist->next_dts = AV_NOPTS_VALUE;
2742 
2743  return 0;
2744 }
2745 
2747 {
2748  if (ost->source_index >= 0)
2749  return input_streams[ost->source_index];
2750  return NULL;
2751 }
2752 
2753 static int compare_int64(const void *a, const void *b)
2754 {
2755  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2756 }
2757 
2758 /* open the muxer when all the streams are initialized */
2759 static int check_init_output_file(OutputFile *of, int file_index)
2760 {
2761  int ret, i;
2762 
2763  for (i = 0; i < of->ctx->nb_streams; i++) {
2764  OutputStream *ost = output_streams[of->ost_index + i];
2765  if (!ost->initialized)
2766  return 0;
2767  }
2768 
2769  of->ctx->interrupt_callback = int_cb;
2770 
2771  ret = avformat_write_header(of->ctx, &of->opts);
2772  if (ret < 0) {
2774  "Could not write header for output file #%d "
2775  "(incorrect codec parameters ?): %s",
2776  file_index, av_err2str(ret));
2777  return ret;
2778  }
2779  //assert_avoptions(of->opts);
2780  of->header_written = 1;
2781 
2782  av_dump_format(of->ctx, file_index, of->ctx->filename, 1);
2783 
2784  if (sdp_filename || want_sdp)
2785  print_sdp();
2786 
2787  /* flush the muxing queues */
2788  for (i = 0; i < of->ctx->nb_streams; i++) {
2789  OutputStream *ost = output_streams[of->ost_index + i];
2790 
2791  while (av_fifo_size(ost->muxing_queue)) {
2792  AVPacket pkt;
2793  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2794  write_packet(of, &pkt, ost);
2795  }
2796  }
2797 
2798  return 0;
2799 }
2800 
2802 {
2803  AVBSFContext *ctx;
2804  int i, ret;
2805 
2806  if (!ost->nb_bitstream_filters)
2807  return 0;
2808 
2809  for (i = 0; i < ost->nb_bitstream_filters; i++) {
2810  ctx = ost->bsf_ctx[i];
2811 
2812  ret = avcodec_parameters_copy(ctx->par_in,
2813  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
2814  if (ret < 0)
2815  return ret;
2816 
2817  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
2818 
2819  ret = av_bsf_init(ctx);
2820  if (ret < 0) {
2821  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2822  ost->bsf_ctx[i]->filter->name);
2823  return ret;
2824  }
2825  }
2826 
2827  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
2828  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2829  if (ret < 0)
2830  return ret;
2831 
2832  ost->st->time_base = ctx->time_base_out;
2833 
2834  return 0;
2835 }
2836 
2838 {
2839  OutputFile *of = output_files[ost->file_index];
2840  InputStream *ist = get_input_stream(ost);
2841  AVCodecParameters *par_dst = ost->st->codecpar;
2842  AVCodecParameters *par_src = ost->ref_par;
2843  AVRational sar;
2844  int i, ret;
2845  uint64_t extra_size;
2846 
2847  av_assert0(ist && !ost->filter);
2848 
2850  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2851  if (ret < 0) {
2853  "Error setting up codec context options.\n");
2854  return ret;
2855  }
2857 
2858  extra_size = (uint64_t)par_src->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE;
2859 
2860  if (extra_size > INT_MAX) {
2861  return AVERROR(EINVAL);
2862  }
2863 
2864  /* if stream_copy is selected, no need to decode or encode */
2865  par_dst->codec_id = par_src->codec_id;
2866  par_dst->codec_type = par_src->codec_type;
2867 
2868  if (!par_dst->codec_tag) {
2869  unsigned int codec_tag;
2870  if (!of->ctx->oformat->codec_tag ||
2871  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_dst->codec_id ||
2872  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag))
2873  par_dst->codec_tag = par_src->codec_tag;
2874  }
2875 
2876  par_dst->bit_rate = par_src->bit_rate;
2877  par_dst->field_order = par_src->field_order;
2878  par_dst->chroma_location = par_src->chroma_location;
2879 
2880  if (par_src->extradata_size) {
2881  par_dst->extradata = av_mallocz(extra_size);
2882  if (!par_dst->extradata) {
2883  return AVERROR(ENOMEM);
2884  }
2885  memcpy(par_dst->extradata, par_src->extradata, par_src->extradata_size);
2886  par_dst->extradata_size = par_src->extradata_size;
2887  }
2888  par_dst->bits_per_coded_sample = par_src->bits_per_coded_sample;
2889  par_dst->bits_per_raw_sample = par_src->bits_per_raw_sample;
2890 
2891  if (!ost->frame_rate.num)
2892  ost->frame_rate = ist->framerate;
2893  ost->st->avg_frame_rate = ost->frame_rate;
2894 
2896  if (ret < 0)
2897  return ret;
2898 
2899  // copy timebase while removing common factors
2901 
2902  if (ist->st->nb_side_data) {
2903  ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
2904  sizeof(*ist->st->side_data));
2905  if (!ost->st->side_data)
2906  return AVERROR(ENOMEM);
2907 
2908  ost->st->nb_side_data = 0;
2909  for (i = 0; i < ist->st->nb_side_data; i++) {
2910  const AVPacketSideData *sd_src = &ist->st->side_data[i];
2911  AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
2912 
2913  if (ost->rotate_overridden && sd_src->type == AV_PKT_DATA_DISPLAYMATRIX)
2914  continue;
2915 
2916  sd_dst->data = av_malloc(sd_src->size);
2917  if (!sd_dst->data)
2918  return AVERROR(ENOMEM);
2919  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2920  sd_dst->size = sd_src->size;
2921  sd_dst->type = sd_src->type;
2922  ost->st->nb_side_data++;
2923  }
2924  }
2925 
2926  ost->parser = av_parser_init(par_dst->codec_id);
2927  ost->parser_avctx = avcodec_alloc_context3(NULL);
2928  if (!ost->parser_avctx)
2929  return AVERROR(ENOMEM);
2930 
2931  switch (par_dst->codec_type) {
2932  case AVMEDIA_TYPE_AUDIO:
2933  if (audio_volume != 256) {
2934  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2935  exit_program(1);
2936  }
2937  par_dst->channel_layout = par_src->channel_layout;
2938  par_dst->sample_rate = par_src->sample_rate;
2939  par_dst->channels = par_src->channels;
2940  par_dst->frame_size = par_src->frame_size;
2941  par_dst->block_align = par_src->block_align;
2942  par_dst->initial_padding = par_src->initial_padding;
2943  par_dst->trailing_padding = par_src->trailing_padding;
2944  par_dst->profile = par_src->profile;
2945  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2946  par_dst->block_align= 0;
2947  if(par_dst->codec_id == AV_CODEC_ID_AC3)
2948  par_dst->block_align= 0;
2949  break;
2950  case AVMEDIA_TYPE_VIDEO:
2951  par_dst->format = par_src->format;
2952  par_dst->color_space = par_src->color_space;
2953  par_dst->color_range = par_src->color_range;
2954  par_dst->color_primaries = par_src->color_primaries;
2955  par_dst->color_trc = par_src->color_trc;
2956  par_dst->width = par_src->width;
2957  par_dst->height = par_src->height;
2958  par_dst->video_delay = par_src->video_delay;
2959  par_dst->profile = par_src->profile;
2960  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2961  sar =
2962  av_mul_q(ost->frame_aspect_ratio,
2963  (AVRational){ par_dst->height, par_dst->width });
2964  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2965  "with stream copy may produce invalid files\n");
2966  }
2967  else if (ist->st->sample_aspect_ratio.num)
2968  sar = ist->st->sample_aspect_ratio;
2969  else
2970  sar = par_src->sample_aspect_ratio;
2971  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2972  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2973  ost->st->r_frame_rate = ist->st->r_frame_rate;
2974  break;
2975  case AVMEDIA_TYPE_SUBTITLE:
2976  par_dst->width = par_src->width;
2977  par_dst->height = par_src->height;
2978  break;
2979  case AVMEDIA_TYPE_UNKNOWN:
2980  case AVMEDIA_TYPE_DATA:
2982  break;
2983  default:
2984  abort();
2985  }
2986 
2987  return 0;
2988 }
2989 
2990 static int init_output_stream(OutputStream *ost, char *error, int error_len)
2991 {
2992  int ret = 0;
2993 
2994  if (ost->encoding_needed) {
2995  AVCodec *codec = ost->enc;
2996  AVCodecContext *dec = NULL;
2997  InputStream *ist;
2998 
2999  if ((ist = get_input_stream(ost)))
3000  dec = ist->dec_ctx;
3001  if (dec && dec->subtitle_header) {
3002  /* ASS code assumes this buffer is null terminated so add extra byte. */
3004  if (!ost->enc_ctx->subtitle_header)
3005  return AVERROR(ENOMEM);
3006  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3008  }
3009  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3010  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3011  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3012  !codec->defaults &&
3013  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3014  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3015  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3016 
3017  if (ost->filter && ost->filter->filter->inputs[0]->hw_frames_ctx) {
3019  if (!ost->enc_ctx->hw_frames_ctx)
3020  return AVERROR(ENOMEM);
3021  }
3022 
3023  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3024  if (ret == AVERROR_EXPERIMENTAL)
3025  abort_codec_experimental(codec, 1);
3026  snprintf(error, error_len,
3027  "Error while opening encoder for output stream #%d:%d - "
3028  "maybe incorrect parameters such as bit_rate, rate, width or height",
3029  ost->file_index, ost->index);
3030  return ret;
3031  }
3032  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3033  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3035  ost->enc_ctx->frame_size);
3037  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3038  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3039  " It takes bits/s as argument, not kbits/s\n");
3040 
3042  if (ret < 0) {
3044  "Error initializing the output stream codec context.\n");
3045  exit_program(1);
3046  }
3047  /*
3048  * FIXME: ost->st->codec should't be needed here anymore.
3049  */
3050  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3051  if (ret < 0)
3052  return ret;
3053 
3054  if (ost->enc_ctx->nb_coded_side_data) {
3055  int i;
3056 
3058  sizeof(*ost->st->side_data));
3059  if (!ost->st->side_data)
3060  return AVERROR(ENOMEM);
3061 
3062  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3063  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3064  AVPacketSideData *sd_dst = &ost->st->side_data[i];
3065 
3066  sd_dst->data = av_malloc(sd_src->size);
3067  if (!sd_dst->data)
3068  return AVERROR(ENOMEM);
3069  memcpy(sd_dst->data, sd_src->data, sd_src->size);
3070  sd_dst->size = sd_src->size;
3071  sd_dst->type = sd_src->type;
3072  ost->st->nb_side_data++;
3073  }
3074  }
3075 
3076  // copy timebase while removing common factors
3077  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3078  ost->st->codec->codec= ost->enc_ctx->codec;
3079  } else if (ost->stream_copy) {
3080  ret = init_output_stream_streamcopy(ost);
3081  if (ret < 0)
3082  return ret;
3083 
3084  /*
3085  * FIXME: will the codec context used by the parser during streamcopy
3086  * This should go away with the new parser API.
3087  */
3088  ret = avcodec_parameters_to_context(ost->parser_avctx, ost->st->codecpar);
3089  if (ret < 0)
3090  return ret;
3091  }
3092 
3093  /* initialize bitstream filters for the output stream
3094  * needs to be done here, because the codec id for streamcopy is not
3095  * known until now */
3096  ret = init_output_bsfs(ost);
3097  if (ret < 0)
3098  return ret;
3099 
3100  ost->initialized = 1;
3101 
3102  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3103  if (ret < 0)
3104  return ret;
3105 
3106  return ret;
3107 }
3108 
3109 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3110  AVCodecContext *avctx)
3111 {
3112  char *p;
3113  int n = 1, i, size, index = 0;
3114  int64_t t, *pts;
3115 
3116  for (p = kf; *p; p++)
3117  if (*p == ',')
3118  n++;
3119  size = n;
3120  pts = av_malloc_array(size, sizeof(*pts));
3121  if (!pts) {
3122  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3123  exit_program(1);
3124  }
3125 
3126  p = kf;
3127  for (i = 0; i < n; i++) {
3128  char *next = strchr(p, ',');
3129 
3130  if (next)
3131  *next++ = 0;
3132 
3133  if (!memcmp(p, "chapters", 8)) {
3134 
3135  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3136  int j;
3137 
3138  if (avf->nb_chapters > INT_MAX - size ||
3139  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3140  sizeof(*pts)))) {
3142  "Could not allocate forced key frames array.\n");
3143  exit_program(1);
3144  }
3145  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3146  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3147 
3148  for (j = 0; j < avf->nb_chapters; j++) {
3149  AVChapter *c = avf->chapters[j];
3150  av_assert1(index < size);
3151  pts[index++] = av_rescale_q(c->start, c->time_base,
3152  avctx->time_base) + t;
3153  }
3154 
3155  } else {
3156 
3157  t = parse_time_or_die("force_key_frames", p, 1);
3158  av_assert1(index < size);
3159  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3160 
3161  }
3162 
3163  p = next;
3164  }
3165 
3166  av_assert0(index == size);
3167  qsort(pts, size, sizeof(*pts), compare_int64);
3168  ost->forced_kf_count = size;
3169  ost->forced_kf_pts = pts;
3170 }
3171 
3172 static void report_new_stream(int input_index, AVPacket *pkt)
3173 {
3174  InputFile *file = input_files[input_index];
3175  AVStream *st = file->ctx->streams[pkt->stream_index];
3176 
3177  if (pkt->stream_index < file->nb_streams_warn)
3178  return;
3179  av_log(file->ctx, AV_LOG_WARNING,
3180  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3182  input_index, pkt->stream_index,
3183  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3184  file->nb_streams_warn = pkt->stream_index + 1;
3185 }
3186 
3188 {
3189  AVDictionaryEntry *e;
3190 
3191  uint8_t *encoder_string;
3192  int encoder_string_len;
3193  int format_flags = 0;
3194  int codec_flags = 0;
3195 
3196  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3197  return;
3198 
3199  e = av_dict_get(of->opts, "fflags", NULL, 0);
3200  if (e) {
3201  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3202  if (!o)
3203  return;
3204  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3205  }
3206  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3207  if (e) {
3208  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3209  if (!o)
3210  return;
3211  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3212  }
3213 
3214  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3215  encoder_string = av_mallocz(encoder_string_len);
3216  if (!encoder_string)
3217  exit_program(1);
3218 
3219  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3220  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3221  else
3222  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3223  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3224  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3226 }
3227 
3228 static int transcode_init(void)
3229 {
3230  int ret = 0, i, j, k;
3231  AVFormatContext *oc;
3232  OutputStream *ost;
3233  InputStream *ist;
3234  char error[1024] = {0};
3235 
3236  for (i = 0; i < nb_filtergraphs; i++) {
3237  FilterGraph *fg = filtergraphs[i];
3238  for (j = 0; j < fg->nb_outputs; j++) {
3239  OutputFilter *ofilter = fg->outputs[j];
3240  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3241  continue;
3242  if (fg->nb_inputs != 1)
3243  continue;
3244  for (k = nb_input_streams-1; k >= 0 ; k--)
3245  if (fg->inputs[0]->ist == input_streams[k])
3246  break;
3247  ofilter->ost->source_index = k;
3248  }
3249  }
3250 
3251  /* init framerate emulation */
3252  for (i = 0; i < nb_input_files; i++) {
3253  InputFile *ifile = input_files[i];
3254  if (ifile->rate_emu)
3255  for (j = 0; j < ifile->nb_streams; j++)
3256  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3257  }
3258 
3259  /* for each output stream, we compute the right encoding parameters */
3260  for (i = 0; i < nb_output_streams; i++) {
3261  ost = output_streams[i];
3262  oc = output_files[ost->file_index]->ctx;
3263  ist = get_input_stream(ost);
3264 
3265  if (ost->attachment_filename)
3266  continue;
3267 
3268  if (ist) {
3269  ost->st->disposition = ist->st->disposition;
3270  } else {
3271  for (j=0; j<oc->nb_streams; j++) {
3272  AVStream *st = oc->streams[j];
3273  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3274  break;
3275  }
3276  if (j == oc->nb_streams)
3277  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3280  }
3281 
3282  if (!ost->stream_copy) {
3283  AVCodecContext *enc_ctx = ost->enc_ctx;
3285 
3286  set_encoder_id(output_files[ost->file_index], ost);
3287 
3288  if (ist) {
3289  dec_ctx = ist->dec_ctx;
3290 
3291  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3292  }
3293 
3294 #if CONFIG_LIBMFX
3295  if (qsv_transcode_init(ost))
3296  exit_program(1);
3297 #endif
3298 
3299 #if CONFIG_CUVID
3300  if (cuvid_transcode_init(ost))
3301  exit_program(1);
3302 #endif
3303 
3304  if ((enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3305  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
3307  FilterGraph *fg = ost->filter->graph;
3308  if (configure_filtergraph(fg)) {
3309  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
3310  exit_program(1);
3311  }
3312  }
3313 
3314  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3315  if (!ost->frame_rate.num)
3317  if (ist && !ost->frame_rate.num)
3318  ost->frame_rate = ist->framerate;
3319  if (ist && !ost->frame_rate.num)
3320  ost->frame_rate = ist->st->r_frame_rate;
3321  if (ist && !ost->frame_rate.num) {
3322  ost->frame_rate = (AVRational){25, 1};
3324  "No information "
3325  "about the input framerate is available. Falling "
3326  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3327  "if you want a different framerate.\n",
3328  ost->file_index, ost->index);
3329  }
3330 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3331  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
3332  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3333  ost->frame_rate = ost->enc->supported_framerates[idx];
3334  }
3335  // reduce frame rate for mpeg4 to be within the spec limits
3336  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3337  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3338  ost->frame_rate.num, ost->frame_rate.den, 65535);
3339  }
3340  }
3341 
3342  switch (enc_ctx->codec_type) {
3343  case AVMEDIA_TYPE_AUDIO:
3344  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
3345  if (dec_ctx)
3346  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3347  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3348  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
3349  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
3350  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
3351  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
3352  break;
3353  case AVMEDIA_TYPE_VIDEO:
3354  enc_ctx->time_base = av_inv_q(ost->frame_rate);
3355  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3356  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
3357  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3359  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3360  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3361  }
3362  for (j = 0; j < ost->forced_kf_count; j++)
3363  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3365  enc_ctx->time_base);
3366 
3367  enc_ctx->width = ost->filter->filter->inputs[0]->w;
3368  enc_ctx->height = ost->filter->filter->inputs[0]->h;
3369  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3370  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3371  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3373  if (!strncmp(ost->enc->name, "libx264", 7) &&
3374  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3377  "No pixel format specified, %s for H.264 encoding chosen.\n"
3378  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3380  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
3381  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
3384  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
3385  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
3387  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
3388  if (dec_ctx)
3389  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3390  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3391 
3392  ost->st->avg_frame_rate = ost->frame_rate;
3393 
3394  if (!dec_ctx ||
3395  enc_ctx->width != dec_ctx->width ||
3396  enc_ctx->height != dec_ctx->height ||
3397  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3399  }
3400 
3401  if (ost->forced_keyframes) {
3402  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3405  if (ret < 0) {
3407  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3408  return ret;
3409  }
3414 
3415  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3416  // parse it only for static kf timings
3417  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3419  }
3420  }
3421  break;
3422  case AVMEDIA_TYPE_SUBTITLE:
3423  enc_ctx->time_base = (AVRational){1, 1000};
3424  if (!enc_ctx->width) {
3425  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3426  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3427  }
3428  break;
3429  case AVMEDIA_TYPE_DATA:
3430  break;
3431  default:
3432  abort();
3433  break;
3434  }
3435  }
3436 
3437  if (ost->disposition) {
3438  static const AVOption opts[] = {
3439  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3440  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3441  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3442  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3443  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3444  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3445  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3446  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3447  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3448  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3449  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3450  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3451  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3452  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3453  { NULL },
3454  };
3455  static const AVClass class = {
3456  .class_name = "",
3457  .item_name = av_default_item_name,
3458  .option = opts,
3459  .version = LIBAVUTIL_VERSION_INT,
3460  };
3461  const AVClass *pclass = &class;
3462 
3463  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3464  if (ret < 0)
3465  goto dump_format;
3466  }
3467  }
3468 
3469  /* init input streams */
3470  for (i = 0; i < nb_input_streams; i++)
3471  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3472  for (i = 0; i < nb_output_streams; i++) {
3473  ost = output_streams[i];
3474  avcodec_close(ost->enc_ctx);
3475  }
3476  goto dump_format;
3477  }
3478 
3479  /* open each encoder */
3480  for (i = 0; i < nb_output_streams; i++) {
3481  ret = init_output_stream(output_streams[i], error, sizeof(error));
3482  if (ret < 0)
3483  goto dump_format;
3484  }
3485 
3486  /* discard unused programs */
3487  for (i = 0; i < nb_input_files; i++) {
3488  InputFile *ifile = input_files[i];
3489  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3490  AVProgram *p = ifile->ctx->programs[j];
3491  int discard = AVDISCARD_ALL;
3492 
3493  for (k = 0; k < p->nb_stream_indexes; k++)
3494  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3495  discard = AVDISCARD_DEFAULT;
3496  break;
3497  }
3498  p->discard = discard;
3499  }
3500  }
3501 
3502  /* write headers for files with no streams */
3503  for (i = 0; i < nb_output_files; i++) {
3504  oc = output_files[i]->ctx;
3505  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3506  ret = check_init_output_file(output_files[i], i);
3507  if (ret < 0)
3508  goto dump_format;
3509  }
3510  }
3511 
3512  dump_format:
3513  /* dump the stream mapping */
3514  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3515  for (i = 0; i < nb_input_streams; i++) {
3516  ist = input_streams[i];
3517 
3518  for (j = 0; j < ist->nb_filters; j++) {
3519  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3520  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3521  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3522  ist->filters[j]->name);
3523  if (nb_filtergraphs > 1)
3524  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3525  av_log(NULL, AV_LOG_INFO, "\n");
3526  }
3527  }
3528  }
3529 
3530  for (i = 0; i < nb_output_streams; i++) {
3531  ost = output_streams[i];
3532 
3533  if (ost->attachment_filename) {
3534  /* an attached file */
3535  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3536  ost->attachment_filename, ost->file_index, ost->index);
3537  continue;
3538  }
3539 
3540  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3541  /* output from a complex graph */
3542  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3543  if (nb_filtergraphs > 1)
3544  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3545 
3546  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3547  ost->index, ost->enc ? ost->enc->name : "?");
3548  continue;
3549  }
3550 
3551  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3552  input_streams[ost->source_index]->file_index,
3553  input_streams[ost->source_index]->st->index,
3554  ost->file_index,
3555  ost->index);
3556  if (ost->sync_ist != input_streams[ost->source_index])
3557  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3558  ost->sync_ist->file_index,
3559  ost->sync_ist->st->index);
3560  if (ost->stream_copy)
3561  av_log(NULL, AV_LOG_INFO, " (copy)");
3562  else {
3563  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3564  const AVCodec *out_codec = ost->enc;
3565  const char *decoder_name = "?";
3566  const char *in_codec_name = "?";
3567  const char *encoder_name = "?";
3568  const char *out_codec_name = "?";
3569  const AVCodecDescriptor *desc;
3570 
3571  if (in_codec) {
3572  decoder_name = in_codec->name;
3573  desc = avcodec_descriptor_get(in_codec->id);
3574  if (desc)
3575  in_codec_name = desc->name;
3576  if (!strcmp(decoder_name, in_codec_name))
3577  decoder_name = "native";
3578  }
3579 
3580  if (out_codec) {
3581  encoder_name = out_codec->name;
3582  desc = avcodec_descriptor_get(out_codec->id);
3583  if (desc)
3584  out_codec_name = desc->name;
3585  if (!strcmp(encoder_name, out_codec_name))
3586  encoder_name = "native";
3587  }
3588 
3589  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3590  in_codec_name, decoder_name,
3591  out_codec_name, encoder_name);
3592  }
3593  av_log(NULL, AV_LOG_INFO, "\n");
3594  }
3595 
3596  if (ret) {
3597  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3598  return ret;
3599  }
3600 
3601  transcode_init_done = 1;
3602 
3603  return 0;
3604 }
3605 
3606 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3607 static int need_output(void)
3608 {
3609  int i;
3610 
3611  for (i = 0; i < nb_output_streams; i++) {
3612  OutputStream *ost = output_streams[i];
3613  OutputFile *of = output_files[ost->file_index];
3614  AVFormatContext *os = output_files[ost->file_index]->ctx;
3615 
3616  if (ost->finished ||
3617  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3618  continue;
3619  if (ost->frame_number >= ost->max_frames) {
3620  int j;
3621  for (j = 0; j < of->ctx->nb_streams; j++)
3622  close_output_stream(output_streams[of->ost_index + j]);
3623  continue;
3624  }
3625 
3626  return 1;
3627  }
3628 
3629  return 0;
3630 }
3631 
3632 /**
3633  * Select the output stream to process.
3634  *
3635  * @return selected output stream, or NULL if none available
3636  */
3638 {
3639  int i;
3640  int64_t opts_min = INT64_MAX;
3641  OutputStream *ost_min = NULL;
3642 
3643  for (i = 0; i < nb_output_streams; i++) {
3644  OutputStream *ost = output_streams[i];
3645  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3646  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3647  AV_TIME_BASE_Q);
3648  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3649  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3650 
3651  if (!ost->finished && opts < opts_min) {
3652  opts_min = opts;
3653  ost_min = ost->unavailable ? NULL : ost;
3654  }
3655  }
3656  return ost_min;
3657 }
3658 
3659 static void set_tty_echo(int on)
3660 {
3661 #if HAVE_TERMIOS_H
3662  struct termios tty;
3663  if (tcgetattr(0, &tty) == 0) {
3664  if (on) tty.c_lflag |= ECHO;
3665  else tty.c_lflag &= ~ECHO;
3666  tcsetattr(0, TCSANOW, &tty);
3667  }
3668 #endif
3669 }
3670 
3672 {
3673  int i, ret, key;
3674  static int64_t last_time;
3675  if (received_nb_signals)
3676  return AVERROR_EXIT;
3677  /* read_key() returns 0 on EOF */
3678  if(cur_time - last_time >= 100000 && !run_as_daemon){
3679  key = read_key();
3680  last_time = cur_time;
3681  }else
3682  key = -1;
3683  if (key == 'q')
3684  return AVERROR_EXIT;
3685  if (key == '+') av_log_set_level(av_log_get_level()+10);
3686  if (key == '-') av_log_set_level(av_log_get_level()-10);
3687  if (key == 's') qp_hist ^= 1;
3688  if (key == 'h'){
3689  if (do_hex_dump){
3690  do_hex_dump = do_pkt_dump = 0;
3691  } else if(do_pkt_dump){
3692  do_hex_dump = 1;
3693  } else
3694  do_pkt_dump = 1;
3696  }
3697  if (key == 'c' || key == 'C'){
3698  char buf[4096], target[64], command[256], arg[256] = {0};
3699  double time;
3700  int k, n = 0;
3701  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3702  i = 0;
3703  set_tty_echo(1);
3704  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3705  if (k > 0)
3706  buf[i++] = k;
3707  buf[i] = 0;
3708  set_tty_echo(0);
3709  fprintf(stderr, "\n");
3710  if (k > 0 &&
3711  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3712  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3713  target, time, command, arg);
3714  for (i = 0; i < nb_filtergraphs; i++) {
3715  FilterGraph *fg = filtergraphs[i];
3716  if (fg->graph) {
3717  if (time < 0) {
3718  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3719  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3720  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3721  } else if (key == 'c') {
3722  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3723  ret = AVERROR_PATCHWELCOME;
3724  } else {
3725  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3726  if (ret < 0)
3727  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3728  }
3729  }
3730  }
3731  } else {
3733  "Parse error, at least 3 arguments were expected, "
3734  "only %d given in string '%s'\n", n, buf);
3735  }
3736  }
3737  if (key == 'd' || key == 'D'){
3738  int debug=0;
3739  if(key == 'D') {
3740  debug = input_streams[0]->st->codec->debug<<1;
3741  if(!debug) debug = 1;
3742  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3743  debug += debug;
3744  }else{
3745  char buf[32];
3746  int k = 0;
3747  i = 0;
3748  set_tty_echo(1);
3749  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3750  if (k > 0)
3751  buf[i++] = k;
3752  buf[i] = 0;
3753  set_tty_echo(0);
3754  fprintf(stderr, "\n");
3755  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3756  fprintf(stderr,"error parsing debug value\n");
3757  }
3758  for(i=0;i<nb_input_streams;i++) {
3759  input_streams[i]->st->codec->debug = debug;
3760  }
3761  for(i=0;i<nb_output_streams;i++) {
3762  OutputStream *ost = output_streams[i];
3763  ost->enc_ctx->debug = debug;
3764  }
3765  if(debug) av_log_set_level(AV_LOG_DEBUG);
3766  fprintf(stderr,"debug=%d\n", debug);
3767  }
3768  if (key == '?'){
3769  fprintf(stderr, "key function\n"
3770  "? show this help\n"
3771  "+ increase verbosity\n"
3772  "- decrease verbosity\n"
3773  "c Send command to first matching filter supporting it\n"
3774  "C Send/Queue command to all matching filters\n"
3775  "D cycle through available debug modes\n"
3776  "h dump packets/hex press to cycle through the 3 states\n"
3777  "q quit\n"
3778  "s Show QP histogram\n"
3779  );
3780  }
3781  return 0;
3782 }
3783 
3784 #if HAVE_PTHREADS
3785 static void *input_thread(void *arg)
3786 {
3787  InputFile *f = arg;
3788  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3789  int ret = 0;
3790 
3791  while (1) {
3792  AVPacket pkt;
3793  ret = av_read_frame(f->ctx, &pkt);
3794 
3795  if (ret == AVERROR(EAGAIN)) {
3796  av_usleep(10000);
3797  continue;
3798  }
3799  if (ret < 0) {
3800  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3801  break;
3802  }
3803  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3804  if (flags && ret == AVERROR(EAGAIN)) {
3805  flags = 0;
3806  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3808  "Thread message queue blocking; consider raising the "
3809  "thread_queue_size option (current value: %d)\n",
3810  f->thread_queue_size);
3811  }
3812  if (ret < 0) {
3813  if (ret != AVERROR_EOF)
3814  av_log(f->ctx, AV_LOG_ERROR,
3815  "Unable to send packet to main thread: %s\n",
3816  av_err2str(ret));
3817  av_packet_unref(&pkt);
3818  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3819  break;
3820  }
3821  }
3822 
3823  return NULL;
3824 }
3825 
3826 static void free_input_threads(void)
3827 {
3828  int i;
3829 
3830  for (i = 0; i < nb_input_files; i++) {
3831  InputFile *f = input_files[i];
3832  AVPacket pkt;
3833 
3834  if (!f || !f->in_thread_queue)
3835  continue;
3837  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3838  av_packet_unref(&pkt);
3839 
3840  pthread_join(f->thread, NULL);
3841  f->joined = 1;
3842  av_thread_message_queue_free(&f->in_thread_queue);
3843  }
3844 }
3845 
3846 static int init_input_threads(void)
3847 {
3848  int i, ret;
3849 
3850  if (nb_input_files == 1)
3851  return 0;
3852 
3853  for (i = 0; i < nb_input_files; i++) {
3854  InputFile *f = input_files[i];
3855 
3856  if (f->ctx->pb ? !f->ctx->pb->seekable :
3857  strcmp(f->ctx->iformat->name, "lavfi"))
3858  f->non_blocking = 1;
3859  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3860  f->thread_queue_size, sizeof(AVPacket));
3861  if (ret < 0)
3862  return ret;
3863 
3864  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3865  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3866  av_thread_message_queue_free(&f->in_thread_queue);
3867  return AVERROR(ret);
3868  }
3869  }
3870  return 0;
3871 }
3872 
3873 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
3874 {
3875  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3876  f->non_blocking ?
3878 }
3879 #endif
3880 
3882 {
3883  if (f->rate_emu) {
3884  int i;
3885  for (i = 0; i < f->nb_streams; i++) {
3886  InputStream *ist = input_streams[f->ist_index + i];
3887  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3888  int64_t now = av_gettime_relative() - ist->start;
3889  if (pts > now)
3890  return AVERROR(EAGAIN);
3891  }
3892  }
3893 
3894 #if HAVE_PTHREADS
3895  if (nb_input_files > 1)
3896  return get_input_packet_mt(f, pkt);
3897 #endif
3898  return av_read_frame(f->ctx, pkt);
3899 }
3900 
3901 static int got_eagain(void)
3902 {
3903  int i;
3904  for (i = 0; i < nb_output_streams; i++)
3905  if (output_streams[i]->unavailable)
3906  return 1;
3907  return 0;
3908 }
3909 
3910 static void reset_eagain(void)
3911 {
3912  int i;
3913  for (i = 0; i < nb_input_files; i++)
3914  input_files[i]->eagain = 0;
3915  for (i = 0; i < nb_output_streams; i++)
3916  output_streams[i]->unavailable = 0;
3917 }
3918 
3919 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
3920 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3921  AVRational time_base)
3922 {
3923  int ret;
3924 
3925  if (!*duration) {
3926  *duration = tmp;
3927  return tmp_time_base;
3928  }
3929 
3930  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3931  if (ret < 0) {
3932  *duration = tmp;
3933  return tmp_time_base;
3934  }
3935 
3936  return time_base;
3937 }
3938 
3939 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3940 {
3941  InputStream *ist;
3942  AVCodecContext *avctx;
3943  int i, ret, has_audio = 0;
3944  int64_t duration = 0;
3945 
3946  ret = av_seek_frame(is, -1, is->start_time, 0);
3947  if (ret < 0)
3948  return ret;
3949 
3950  for (i = 0; i < ifile->nb_streams; i++) {
3951  ist = input_streams[ifile->ist_index + i];
3952  avctx = ist->dec_ctx;
3953 
3954  // flush decoders
3955  if (ist->decoding_needed) {
3956  process_input_packet(ist, NULL, 1);
3957  avcodec_flush_buffers(avctx);
3958  }
3959 
3960  /* duration is the length of the last frame in a stream
3961  * when audio stream is present we don't care about
3962  * last video frame length because it's not defined exactly */
3963  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3964  has_audio = 1;
3965  }
3966 
3967  for (i = 0; i < ifile->nb_streams; i++) {
3968  ist = input_streams[ifile->ist_index + i];
3969  avctx = ist->dec_ctx;
3970 
3971  if (has_audio) {
3972  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3973  AVRational sample_rate = {1, avctx->sample_rate};
3974 
3975  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3976  } else
3977  continue;
3978  } else {
3979  if (ist->framerate.num) {
3980  duration = av_rescale_q(1, ist->framerate, ist->st->time_base);
3981  } else if (ist->st->avg_frame_rate.num) {
3982  duration = av_rescale_q(1, ist->st->avg_frame_rate, ist->st->time_base);
3983  } else duration = 1;
3984  }
3985  if (!ifile->duration)
3986  ifile->time_base = ist->st->time_base;
3987  /* the total duration of the stream, max_pts - min_pts is
3988  * the duration of the stream without the last frame */
3989  duration += ist->max_pts - ist->min_pts;
3990  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3991  ifile->time_base);
3992  }
3993 
3994  if (ifile->loop > 0)
3995  ifile->loop--;
3996 
3997  return ret;
3998 }
3999 
4000 /*
4001  * Return
4002  * - 0 -- one packet was read and processed
4003  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4004  * this function should be called again
4005  * - AVERROR_EOF -- this function should not be called again
4006  */
4007 static int process_input(int file_index)
4008 {
4009  InputFile *ifile = input_files[file_index];
4010  AVFormatContext *is;
4011  InputStream *ist;
4012  AVPacket pkt;
4013  int ret, i, j;
4014  int64_t duration;
4015  int64_t pkt_dts;
4016 
4017  is = ifile->ctx;
4018  ret = get_input_packet(ifile, &pkt);
4019 
4020  if (ret == AVERROR(EAGAIN)) {
4021  ifile->eagain = 1;
4022  return ret;
4023  }
4024  if (ret < 0 && ifile->loop) {
4025  if ((ret = seek_to_start(ifile, is)) < 0)
4026  return ret;
4027  ret = get_input_packet(ifile, &pkt);
4028  if (ret == AVERROR(EAGAIN)) {
4029  ifile->eagain = 1;
4030  return ret;
4031  }
4032  }
4033  if (ret < 0) {
4034  if (ret != AVERROR_EOF) {
4035  print_error(is->filename, ret);
4036  if (exit_on_error)
4037  exit_program(1);
4038  }
4039 
4040  for (i = 0; i < ifile->nb_streams; i++) {
4041  ist = input_streams[ifile->ist_index + i];
4042  if (ist->decoding_needed) {
4043  ret = process_input_packet(ist, NULL, 0);
4044  if (ret>0)
4045  return 0;
4046  }
4047 
4048  /* mark all outputs that don't go through lavfi as finished */
4049  for (j = 0; j < nb_output_streams; j++) {
4050  OutputStream *ost = output_streams[j];
4051 
4052  if (ost->source_index == ifile->ist_index + i &&
4053  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4054  finish_output_stream(ost);
4055  }
4056  }
4057 
4058  ifile->eof_reached = 1;
4059  return AVERROR(EAGAIN);
4060  }
4061 
4062  reset_eagain();
4063 
4064  if (do_pkt_dump) {
4066  is->streams[pkt.stream_index]);
4067  }
4068  /* the following test is needed in case new streams appear
4069  dynamically in stream : we ignore them */
4070  if (pkt.stream_index >= ifile->nb_streams) {
4071  report_new_stream(file_index, &pkt);
4072  goto discard_packet;
4073  }
4074 
4075  ist = input_streams[ifile->ist_index + pkt.stream_index];
4076 
4077  ist->data_size += pkt.size;
4078  ist->nb_packets++;
4079 
4080  if (ist->discard)
4081  goto discard_packet;
4082 
4083  if (exit_on_error && (pkt.flags & AV_PKT_FLAG_CORRUPT)) {
4084  av_log(NULL, AV_LOG_FATAL, "%s: corrupt input packet in stream %d\n", is->filename, pkt.stream_index);
4085  exit_program(1);
4086  }
4087 
4088  if (debug_ts) {
4089  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4090  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4094  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4095  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4096  av_ts2str(input_files[ist->file_index]->ts_offset),
4097  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4098  }
4099 
4100  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4101  int64_t stime, stime2;
4102  // Correcting starttime based on the enabled streams
4103  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4104  // so we instead do it here as part of discontinuity handling
4105  if ( ist->next_dts == AV_NOPTS_VALUE
4106  && ifile->ts_offset == -is->start_time
4107  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4108  int64_t new_start_time = INT64_MAX;
4109  for (i=0; i<is->nb_streams; i++) {
4110  AVStream *st = is->streams[i];
4111  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4112  continue;
4113  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4114  }
4115  if (new_start_time > is->start_time) {
4116  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4117  ifile->ts_offset = -new_start_time;
4118  }
4119  }
4120 
4121  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4122  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4123  ist->wrap_correction_done = 1;
4124 
4125  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4126  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4127  ist->wrap_correction_done = 0;
4128  }
4129  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4130  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4131  ist->wrap_correction_done = 0;
4132  }
4133  }
4134 
4135  /* add the stream-global side data to the first packet */
4136  if (ist->nb_packets == 1) {
4137  if (ist->st->nb_side_data)
4139  for (i = 0; i < ist->st->nb_side_data; i++) {
4140  AVPacketSideData *src_sd = &ist->st->side_data[i];
4141  uint8_t *dst_data;
4142 
4143  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4144  continue;
4145  if (ist->autorotate && src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4146  continue;
4147 
4148  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4149  if (!dst_data)
4150  exit_program(1);
4151 
4152  memcpy(dst_data, src_sd->data, src_sd->size);
4153  }
4154  }
4155 
4156  if (pkt.dts != AV_NOPTS_VALUE)
4157  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4158  if (pkt.pts != AV_NOPTS_VALUE)
4159  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4160 
4161  if (pkt.pts != AV_NOPTS_VALUE)
4162  pkt.pts *= ist->ts_scale;
4163  if (pkt.dts != AV_NOPTS_VALUE)
4164  pkt.dts *= ist->ts_scale;
4165 
4167  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4169  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4170  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4171  int64_t delta = pkt_dts - ifile->last_ts;
4172  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4173  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4174  ifile->ts_offset -= delta;
4176  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4177  delta, ifile->ts_offset);
4178  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4179  if (pkt.pts != AV_NOPTS_VALUE)
4180  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4181  }
4182  }
4183 
4184  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4185  if (pkt.pts != AV_NOPTS_VALUE) {
4186  pkt.pts += duration;
4187  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4188  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4189  }
4190 
4191  if (pkt.dts != AV_NOPTS_VALUE)
4192  pkt.dts += duration;
4193 
4195  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4197  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4198  !copy_ts) {
4199  int64_t delta = pkt_dts - ist->next_dts;
4200  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4201  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4202  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4203  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4204  ifile->ts_offset -= delta;
4206  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4207  delta, ifile->ts_offset);
4208  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4209  if (pkt.pts != AV_NOPTS_VALUE)
4210  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4211  }
4212  } else {
4213  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4214  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4215  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4216  pkt.dts = AV_NOPTS_VALUE;
4217  }
4218  if (pkt.pts != AV_NOPTS_VALUE){
4219  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4220  delta = pkt_pts - ist->next_dts;
4221  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4222  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4223  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4224  pkt.pts = AV_NOPTS_VALUE;
4225  }
4226  }
4227  }
4228  }
4229 
4230  if (pkt.dts != AV_NOPTS_VALUE)
4231  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4232 
4233  if (debug_ts) {
4234  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4236  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4237  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4238  av_ts2str(input_files[ist->file_index]->ts_offset),
4239  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4240  }
4241 
4242  sub2video_heartbeat(ist, pkt.pts);
4243 
4244  process_input_packet(ist, &pkt, 0);
4245 
4246 discard_packet:
4247  av_packet_unref(&pkt);
4248 
4249  return 0;
4250 }
4251 
4252 /**
4253  * Perform a step of transcoding for the specified filter graph.
4254  *
4255  * @param[in] graph filter graph to consider
4256  * @param[out] best_ist input stream where a frame would allow to continue
4257  * @return 0 for success, <0 for error
4258  */
4259 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4260 {
4261  int i, ret;
4262  int nb_requests, nb_requests_max = 0;
4263  InputFilter *ifilter;
4264  InputStream *ist;
4265 
4266  *best_ist = NULL;
4267  ret = avfilter_graph_request_oldest(graph->graph);
4268  if (ret >= 0)
4269  return reap_filters(0);
4270 
4271  if (ret == AVERROR_EOF) {
4272  ret = reap_filters(1);
4273  for (i = 0; i < graph->nb_outputs; i++)
4274  close_output_stream(graph->outputs[i]->ost);
4275  return ret;
4276  }
4277  if (ret != AVERROR(EAGAIN))
4278  return ret;
4279 
4280  for (i = 0; i < graph->nb_inputs; i++) {
4281  ifilter = graph->inputs[i];
4282  ist = ifilter->ist;
4283  if (input_files[ist->file_index]->eagain ||
4284  input_files[ist->file_index]->eof_reached)
4285  continue;
4286  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4287  if (nb_requests > nb_requests_max) {
4288  nb_requests_max = nb_requests;
4289  *best_ist = ist;
4290  }
4291  }
4292 
4293  if (!*best_ist)
4294  for (i = 0; i < graph->nb_outputs; i++)
4295  graph->outputs[i]->ost->unavailable = 1;
4296 
4297  return 0;
4298 }
4299 
4300 /**
4301  * Run a single step of transcoding.
4302  *
4303  * @return 0 for success, <0 for error
4304  */
4305 static int transcode_step(void)
4306 {
4307  OutputStream *ost;
4308  InputStream *ist;
4309  int ret;
4310 
4311  ost = choose_output();
4312  if (!ost) {
4313  if (got_eagain()) {
4314  reset_eagain();
4315  av_usleep(10000);
4316  return 0;
4317  }
4318  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4319  return AVERROR_EOF;
4320  }
4321 
4322  if (ost->filter) {
4323  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4324  return ret;
4325  if (!ist)
4326  return 0;
4327  } else {
4328  av_assert0(ost->source_index >= 0);
4329  ist = input_streams[ost->source_index];
4330  }
4331 
4332  ret = process_input(ist->file_index);
4333  if (ret == AVERROR(EAGAIN)) {
4334  if (input_files[ist->file_index]->eagain)
4335  ost->unavailable = 1;
4336  return 0;
4337  }
4338 
4339  if (ret < 0)
4340  return ret == AVERROR_EOF ? 0 : ret;
4341 
4342  return reap_filters(0);
4343 }
4344 
4345 /*
4346  * The following code is the main loop of the file converter
4347  */
4348 static int transcode(void)
4349 {
4350  int ret, i;
4351  AVFormatContext *os;
4352  OutputStream *ost;
4353  InputStream *ist;
4354  int64_t timer_start;
4355  int64_t total_packets_written = 0;
4356 
4357  ret = transcode_init();
4358  if (ret < 0)
4359  goto fail;
4360 
4361  if (stdin_interaction) {
4362  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4363  }
4364 
4365  timer_start = av_gettime_relative();
4366 
4367 #if HAVE_PTHREADS
4368  if ((ret = init_input_threads()) < 0)
4369  goto fail;
4370 #endif
4371 
4372  while (!received_sigterm) {
4373  int64_t cur_time= av_gettime_relative();
4374 
4375  /* if 'q' pressed, exits */
4376  if (stdin_interaction)
4377  if (check_keyboard_interaction(cur_time) < 0)
4378  break;
4379 
4380  /* check if there's any stream where output is still needed */
4381  if (!need_output()) {
4382  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4383  break;
4384  }
4385 
4386  ret = transcode_step();
4387  if (ret < 0 && ret != AVERROR_EOF) {
4388  char errbuf[128];
4389  av_strerror(ret, errbuf, sizeof(errbuf));
4390 
4391  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", errbuf);
4392  break;
4393  }
4394 
4395  /* dump report by using the output first video and audio streams */
4396  print_report(0, timer_start, cur_time);
4397  }
4398 #if HAVE_PTHREADS
4399  free_input_threads();
4400 #endif
4401 
4402  /* at the end of stream, we must flush the decoder buffers */
4403  for (i = 0; i < nb_input_streams; i++) {
4404  ist = input_streams[i];
4405  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
4406  process_input_packet(ist, NULL, 0);
4407  }
4408  }
4409  flush_encoders();
4410 
4411  term_exit();
4412 
4413  /* write the trailer if needed and close file */
4414  for (i = 0; i < nb_output_files; i++) {
4415  os = output_files[i]->ctx;
4416  if (!output_files[i]->header_written) {
4418  "Nothing was written into output file %d (%s), because "
4419  "at least one of its streams received no packets.\n",
4420  i, os->filename);
4421  continue;
4422  }
4423  if ((ret = av_write_trailer(os)) < 0) {
4424  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s", os->filename, av_err2str(ret));
4425  if (exit_on_error)
4426  exit_program(1);
4427  }
4428  }
4429 
4430  /* dump report by using the first video and audio streams */
4431  print_report(1, timer_start, av_gettime_relative());
4432 
4433  /* close each encoder */
4434  for (i = 0; i < nb_output_streams; i++) {
4435  ost = output_streams[i];
4436  if (ost->encoding_needed) {
4437  av_freep(&ost->enc_ctx->stats_in);
4438  }
4439  total_packets_written += ost->packets_written;
4440  }
4441 
4442  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4443  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4444  exit_program(1);
4445  }
4446 
4447  /* close each decoder */
4448  for (i = 0; i < nb_input_streams; i++) {
4449  ist = input_streams[i];
4450  if (ist->decoding_needed) {
4451  avcodec_close(ist->dec_ctx);
4452  if (ist->hwaccel_uninit)
4453  ist->hwaccel_uninit(ist->dec_ctx);
4454  }
4455  }
4456 
4458 
4459  /* finished ! */
4460  ret = 0;
4461 
4462  fail:
4463 #if HAVE_PTHREADS
4464  free_input_threads();
4465 #endif
4466 
4467  if (output_streams) {
4468  for (i = 0; i < nb_output_streams; i++) {
4469  ost = output_streams[i];
4470  if (ost) {
4471  if (ost->logfile) {
4472  if (fclose(ost->logfile))
4474  "Error closing logfile, loss of information possible: %s\n",
4475  av_err2str(AVERROR(errno)));
4476  ost->logfile = NULL;
4477  }
4478  av_freep(&ost->forced_kf_pts);
4479  av_freep(&ost->apad);
4480  av_freep(&ost->disposition);
4481  av_dict_free(&ost->encoder_opts);
4482  av_dict_free(&ost->sws_dict);
4483  av_dict_free(&ost->swr_opts);
4484  av_dict_free(&ost->resample_opts);
4485  }
4486  }
4487  }
4488  return ret;
4489 }
4490 
4491 
4492 static int64_t getutime(void)
4493 {
4494 #if HAVE_GETRUSAGE
4495  struct rusage rusage;
4496 
4497  getrusage(RUSAGE_SELF, &rusage);
4498  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4499 #elif HAVE_GETPROCESSTIMES
4500  HANDLE proc;
4501  FILETIME c, e, k, u;
4502  proc = GetCurrentProcess();
4503  GetProcessTimes(proc, &c, &e, &k, &u);
4504  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4505 #else
4506  return av_gettime_relative();
4507 #endif
4508 }
4509 
4510 static int64_t getmaxrss(void)
4511 {
4512 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4513  struct rusage rusage;
4514  getrusage(RUSAGE_SELF, &rusage);
4515  return (int64_t)rusage.ru_maxrss * 1024;
4516 #elif HAVE_GETPROCESSMEMORYINFO
4517  HANDLE proc;
4518  PROCESS_MEMORY_COUNTERS memcounters;
4519  proc = GetCurrentProcess();
4520  memcounters.cb = sizeof(memcounters);
4521  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4522  return memcounters.PeakPagefileUsage;
4523 #else
4524  return 0;
4525 #endif
4526 }
4527 
4528 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4529 {
4530 }
4531 
4532 int main(int argc, char **argv)
4533 {
4534  int i, ret;
4535  int64_t ti;
4536 
4537  init_dynload();
4538 
4540 
4541  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4542 
4544  parse_loglevel(argc, argv, options);
4545 
4546  if(argc>1 && !strcmp(argv[1], "-d")){
4547  run_as_daemon=1;
4549  argc--;
4550  argv++;
4551  }
4552 
4554 #if CONFIG_AVDEVICE
4556 #endif
4558  av_register_all();
4560 
4561  show_banner(argc, argv, options);
4562 
4563  /* parse options and open all input/output files */
4564  ret = ffmpeg_parse_options(argc, argv);
4565  if (ret < 0)
4566  exit_program(1);
4567 
4568  if (nb_output_files <= 0 && nb_input_files == 0) {
4569  show_usage();
4570  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4571  exit_program(1);
4572  }
4573 
4574  /* file converter / grab */
4575  if (nb_output_files <= 0) {
4576  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4577  exit_program(1);
4578  }
4579 
4580 // if (nb_input_files == 0) {
4581 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4582 // exit_program(1);
4583 // }
4584 
4585  for (i = 0; i < nb_output_files; i++) {
4586  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4587  want_sdp = 0;
4588  }
4589 
4590  current_time = ti = getutime();
4591  if (transcode() < 0)
4592  exit_program(1);
4593  ti = getutime() - ti;
4594  if (do_benchmark) {
4595  av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
4596  }
4597  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4600  exit_program(69);
4601 
4603  return main_return_code;
4604 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:281
int nb_bitstream_filters
Definition: ffmpeg.h:424
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:895
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:113
uint32_t BOOL
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2939
enum AVChromaLocation chroma_location
Definition: avcodec.h:4070
int got_output
Definition: ffmpeg.h:309
#define AV_DISPOSITION_METADATA
Definition: avformat.h:873
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:36
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1882
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1060
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1995
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:413
const struct AVCodec * codec
Definition: avcodec.h:1685
Definition: ffmpeg.h:390
AVRational framerate
Definition: avcodec.h:3375
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:4061
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:919
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:336
const char * s
Definition: avisynth_c.h:768
Bytestream IO Context.
Definition: avio.h:147
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:520
void term_init(void)
Definition: ffmpeg.c:368
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5762
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:240
int nb_outputs
Definition: ffmpeg.h:257
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
AVDictionary * swr_opts
Definition: ffmpeg.h:470
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:267
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2266
int resample_channels
Definition: ffmpeg.h:304
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:2837
void term_exit(void)
Definition: ffmpeg.c:310
int stream_copy
Definition: ffmpeg.h:475
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1225
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3922
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1592
AVOption.
Definition: opt.h:245
AVRational frame_rate
Definition: ffmpeg.h:440
int64_t * forced_kf_pts
Definition: ffmpeg.h:449
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:290
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:3015
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:343
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:465
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:874
#define CODEC_FLAG_PASS2
Definition: avcodec.h:1090
static int process_input(int file_index)
Definition: ffmpeg.c:4007
int exit_on_error
Definition: ffmpeg_opt.c:115
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:2990
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1741
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1621
uint8_t * bsf_extradata_updated
Definition: ffmpeg.h:425
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:127
Memory buffer source API.
const char * desc
Definition: nvenc.c:101
void av_log_set_level(int level)
Set the log level.
Definition: log.c:391
AVRational framerate
Definition: ffmpeg.h:293
Immediately push the frame to the output.
Definition: buffersrc.h:46
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: utils.c:2991
AVCodecParserContext * parser
Definition: ffmpeg.h:489
static int64_t cur_time
Definition: ffserver.c:262
int64_t max_pts
Definition: ffmpeg.h:287
int decoding_needed
Definition: ffmpeg.h:265
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3980
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5740
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:956
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5731
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1590
#define vsnprintf
Definition: snprintf.h:36
int index
stream index in AVFormatContext
Definition: avformat.h:890
int size
Definition: avcodec.h:1602
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4510
int max_muxing_queue_size
Definition: ffmpeg.h:504
const char * b
Definition: vf_curves.c:113
static int nb_frames_dup
Definition: ffmpeg.c:128
int av_log2(unsigned v)
Definition: intmath.c:26
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2746
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:607
#define AV_DISPOSITION_DUB
Definition: avformat.h:837
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:2087
int eagain
Definition: ffmpeg.h:361
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1145
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1904
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:614
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:849
int quality
Definition: ffmpeg.h:502
unsigned num_rects
Definition: avcodec.h:3960
AVFrame * filter_frame
Definition: ffmpeg.h:272
static int transcode_init(void)
Definition: ffmpeg.c:3228
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2753
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2418
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2853
int do_benchmark_all
Definition: ffmpeg_opt.c:108
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:851
int last_dropped
Definition: ffmpeg.h:434
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
uint64_t_TMPL AV_RL64
Definition: bytestream.h:87
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:658
discard all
Definition: avcodec.h:787
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:996
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:339
int64_t input_ts_offset
Definition: ffmpeg.h:367
int do_hex_dump
Definition: ffmpeg_opt.c:109
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:3077
int nb_input_streams
Definition: ffmpeg.c:140
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:61
const char * name
Definition: ffmpeg.h:73
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2643
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3172
Picture data structure.
Definition: avcodec.h:3889
uint64_t packets_written
Definition: ffmpeg.h:496
AVCodec.
Definition: avcodec.h:3600
#define VSYNC_VFR
Definition: ffmpeg.h:54
int nb_dts_buffer
Definition: ffmpeg.h:355
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:496
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3972
int print_stats
Definition: ffmpeg_opt.c:117
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:135
float dts_error_threshold
Definition: ffmpeg_opt.c:100
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:521
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
uint64_t data_size
Definition: ffmpeg.h:494
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:454
AVBSFContext ** bsf_ctx
Definition: ffmpeg.h:426
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:841
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1813
Undefined.
Definition: avutil.h:267
AVSubtitleRect ** rects
Definition: avcodec.h:3961
int encoding_needed
Definition: ffmpeg.h:412
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:619
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4528
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3671
Format I/O context.
Definition: avformat.h:1338
uint64_t samples_decoded
Definition: ffmpeg.h:352
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:231
unsigned int nb_stream_indexes
Definition: avformat.h:1270
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:75
int64_t cur_dts
Definition: avformat.h:1061
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3924
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:984
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:315
uint64_t frames_decoded
Definition: ffmpeg.h:351
int header_written
Definition: ffmpeg.h:526
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:251
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:460
static uint8_t * subtitle_out
Definition: ffmpeg.c:137
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:199
static int main_return_code
Definition: ffmpeg.c:320
static int64_t start_time
Definition: ffplay.c:326
int copy_initial_nonkeyframes
Definition: ffmpeg.h:483
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:124
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2801
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2446
int64_t * dts_buffer
Definition: ffmpeg.h:354
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:543
uint8_t
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof)
Definition: ffmpeg.c:2177
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
Opaque data information usually continuous.
Definition: avutil.h:197
AVDictionary * sws_dict
Definition: ffmpeg.h:469
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: avcodec.h:4046
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:205
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:214
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3312
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5336
int stdin_interaction
Definition: ffmpeg_opt.c:119
FILE * logfile
Definition: ffmpeg.h:461
AVDictionary * opts
Definition: ffmpeg.h:518
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:570
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1619
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1425
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:185
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2629
static int need_output(void)
Definition: ffmpeg.c:3607
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:383
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:1000
static double psnr(double d)
Definition: ffmpeg.c:1316
int do_benchmark
Definition: ffmpeg_opt.c:107
int audio_sync_method
Definition: ffmpeg_opt.c:103
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, uint8_t clip)
Definition: cfhd.c:80
int shortest
Definition: ffmpeg.h:524
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1406
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:4223
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:4492
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:112
const char * name
Definition: avcodec.h:5778
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:344
int nb_streams
Definition: ffmpeg.h:374
uint8_t * data
Definition: avcodec.h:1601
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static void set_tty_echo(int on)
Definition: ffmpeg.c:3659
AVDictionary * resample_opts
Definition: ffmpeg.h:471
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:622
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3109
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:237
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:3939
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4671
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:142
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:403
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1378
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:319
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:827
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1321
int resample_sample_rate
Definition: ffmpeg.h:303
uint8_t * data
Definition: avcodec.h:1545
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:322
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:500
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:306
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3925
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:511
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:270
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1268
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2845
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:204
int top_field_first
Definition: ffmpeg.h:294
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1466
int nb_output_streams
Definition: ffmpeg.c:145
int file_index
Definition: ffmpeg.h:261
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:208
const OptionDef options[]
Definition: ffserver.c:3969
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2420
unsigned int * stream_index
Definition: avformat.h:1269
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:300
int resample_height
Definition: ffmpeg.h:298
int wrap_correction_done
Definition: ffmpeg.h:282
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:284
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:261
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:871
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1357
int64_t next_dts
Definition: ffmpeg.h:277
int64_t bit_rate
The average bitrate of the encoded data (in bits per second).
Definition: avcodec.h:4009
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1633
attribute_deprecated int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Definition: avpicture.c:37
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:511
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:50
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:3023
static volatile int transcode_init_done
Definition: ffmpeg.c:318
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:784
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5398
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3614
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3661
int rate_emu
Definition: ffmpeg.h:377
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:4148
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1998
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1359
static void reset_eagain(void)
Definition: ffmpeg.c:3910
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:341
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1631
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:630
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3148
FilterGraph ** filtergraphs
Definition: ffmpeg.c:149
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:459
AVFilterContext * filter
Definition: ffmpeg.h:230
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:338
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
int64_t start
Definition: ffmpeg.h:274
int loop
Definition: ffmpeg.h:363
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3923
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:349
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:422
int video_sync_method
Definition: ffmpeg_opt.c:104
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:130
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:178
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
char * sdp_filename
Definition: ffmpeg_opt.c:96
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:435
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2347
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int video_delay
Video only.
Definition: avcodec.h:4075
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: utils.c:2880
const char * r
Definition: vf_curves.c:111
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: avcodec.h:3619
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:130
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
unsigned int nb_programs
Definition: avformat.h:1493
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:517
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3976
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:539
int av_frame_get_decode_error_flags(const AVFrame *frame)
int cuvid_transcode_init(OutputStream *ost)
Definition: ffmpeg_cuvid.c:60
AVChapter ** chapters
Definition: avformat.h:1544
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5768
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1547
int av_log_get_level(void)
Get the current log level.
Definition: log.c:386
const char * name
Name of the codec implementation.
Definition: avcodec.h:3607
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:846
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:74
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:596
int force_fps
Definition: ffmpeg.h:442
int qsv_transcode_init(OutputStream *ost)
Definition: ffmpeg_qsv.c:183
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:967
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1265
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:4137
int qp_hist
Definition: ffmpeg_opt.c:118
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:83
float frame_drop_threshold
Definition: ffmpeg_opt.c:105
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1038
int64_t error[4]
Definition: ffmpeg.h:513
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1607
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2977
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2489
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
int extradata_size
Size of the extradata content in bytes.
Definition: avcodec.h:3998
uint32_t end_display_time
Definition: avcodec.h:3959
static int want_sdp
Definition: ffmpeg.c:132
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3962
OutputFilter * filter
Definition: ffmpeg.h:463
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2015
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:353
AVRational frame_aspect_ratio
Definition: ffmpeg.h:446
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:840
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1566
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:848
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1394
AVDictionary * opts
Definition: movenc.c:50
AVCodecContext * parser_avctx
Definition: ffmpeg.h:490
static int nb_frames_drop
Definition: ffmpeg.c:129
A bitmap, pict will be set.
Definition: avcodec.h:3904
int linesize[4]
Definition: avcodec.h:3940
int nb_output_files
Definition: ffmpeg.c:147
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:243
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:224
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:441
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
static int transcode(void)
Definition: ffmpeg.c:4348
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:886
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:413
char filename[1024]
input or output filename
Definition: avformat.h:1414
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:248
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:527
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3518
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:190
int saw_first_ts
Definition: ffmpeg.h:291
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
int abort_on_flags
Definition: ffmpeg_opt.c:116
This side data contains quality related information from the encoder.
Definition: avcodec.h:1449
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2058
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:238
int width
picture width / height.
Definition: avcodec.h:1863
PVOID HANDLE
char * apad
Definition: ffmpeg.h:472
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3542
int64_t nb_samples
Definition: ffmpeg.h:288
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5774
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:454
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:176
int64_t duration
Definition: ffmpeg.h:364
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:401
const char * name
Definition: avformat.h:524
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:240
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:865
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:849
int nb_filtergraphs
Definition: ffmpeg.c:150
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:88
int64_t last_ts
Definition: ffmpeg.h:370
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:298
int do_pkt_dump
Definition: ffmpeg_opt.c:110
int64_t max_frames
Definition: ffmpeg.h:431
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:340
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:302
int audio_channels_mapped
Definition: ffmpeg.h:458
int n
Definition: avisynth_c.h:684
AVDictionary * metadata
Definition: avformat.h:958
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1822
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:602
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:194
Opaque data information usually sparse.
Definition: avutil.h:199
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define src
Definition: vp9dsp.c:530
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3939
static int got_eagain(void)
Definition: ffmpeg.c:3901
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:196
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:227
Keep a reference to the frame.
Definition: buffersrc.h:53
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:3146
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:129
AVCodecContext * enc
Definition: muxing.c:55
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:415
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:850
int ret
Definition: ffmpeg.h:310
int audio_volume
Definition: ffmpeg_opt.c:102
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3435
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:486
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
InputFilter ** filters
Definition: ffmpeg.h:325
int fix_sub_duration
Definition: ffmpeg.h:307
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: utils.c:2831
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:373
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4684
Definition: ffmpeg.h:72
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2458
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:74
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2759
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:836
AVStream * st
Definition: ffmpeg.h:262
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:168
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3187
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int frame_size
Definition: mxfenc.c:1820
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:51
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:872
int ost_index
Definition: ffmpeg.h:519
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:723
enum AVMediaType codec_type
Definition: avcodec.h:1684
double ts_scale
Definition: ffmpeg.h:290
int unavailable
Definition: ffmpeg.h:474
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2693
enum AVCodecID codec_id
Definition: avcodec.h:1693
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:318
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:254
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1561
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:183
float max_error_rate
Definition: ffmpeg_opt.c:121
int sample_rate
samples per second
Definition: avcodec.h:2438
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
uint64_t frames_encoded
Definition: ffmpeg.h:498
AVIOContext * pb
I/O context.
Definition: avformat.h:1380
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:507
int ist_index
Definition: ffmpeg.h:362
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:549
static int loop
Definition: ffplay.c:335
int debug
debug
Definition: avcodec.h:2916
static void print_sdp(void)
Definition: ffmpeg.c:2584
const char * graph_desc
Definition: ffmpeg.h:249
int guess_layout_max
Definition: ffmpeg.h:295
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int64_t start_time
Definition: ffmpeg.h:371
struct InputStream::@38 prev_sub
main external API structure.
Definition: avcodec.h:1676
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:567
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:318
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:461
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:767
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2748
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
const char * attachment_filename
Definition: ffmpeg.h:482
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1865
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: utils.c:2961
AVRational time_base
Definition: ffmpeg.h:366
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:605
AVCodecContext * enc_ctx
Definition: ffmpeg.h:428
void * buf
Definition: avisynth_c.h:690
AVFrame * decoded_frame
Definition: ffmpeg.h:271
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1792
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4259
int nb_coded_side_data
Definition: avcodec.h:3519
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:645
int * audio_channels_map
Definition: ffmpeg.h:457
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:348
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1954
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:144
int index
Definition: gxfenc.c:89
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2928
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:408
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:398
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:4166
static int current_time
Definition: ffmpeg.c:134
int64_t sync_opts
Definition: ffmpeg.h:417
char * vstats_filename
Definition: ffmpeg_opt.c:95
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:158
AVCodecContext * dec_ctx
Definition: ffmpeg.h:269
char * disposition
Definition: ffmpeg.h:485
int filtergraph_is_simple(FilterGraph *fg)
#define mid_pred
Definition: mathops.h:96
AVMediaType
Definition: avutil.h:193
discard useless packets like 0 size packets in avi
Definition: avcodec.h:782
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1057
int nb_streams_warn
Definition: ffmpeg.h:376
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2593
AVDictionary * decoder_opts
Definition: ffmpeg.h:292
int autorotate
Definition: ffmpeg.h:297
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:668
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1241
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:120
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4129
#define u(width,...)
int64_t ts_offset
Definition: ffmpeg.h:369
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:266
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4305
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:466
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3737
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1676
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:3920
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:271
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:660
AVFrame * filtered_frame
Definition: ffmpeg.h:432
int source_index
Definition: ffmpeg.h:410
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:275
static volatile int received_nb_signals
Definition: ffmpeg.c:317
int copy_prior_start
Definition: ffmpeg.h:484
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:493
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1757
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:636
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:326
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2638
int bits_per_raw_sample
This is the number of valid bits in each output sample.
Definition: avcodec.h:4035
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1423
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
uint8_t level
Definition: svq3.c:207
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:453
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:278
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2412
int av_strerror(int errnum, char *errbuf, size_t errbuf_size)
Put a description of the AVERROR code errnum in errbuf.
Definition: error.c:105
int resample_sample_fmt
Definition: ffmpeg.h:302
int forced_kf_count
Definition: ffmpeg.h:450
int64_t start
Definition: avformat.h:1298
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:473
char * forced_keyframes
Definition: ffmpeg.h:452
uint64_t data_size
Definition: ffmpeg.h:347
int resample_width
Definition: ffmpeg.h:299
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:280
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1046
struct FilterGraph * graph
Definition: ffmpeg.h:239
uint64_t limit_filesize
Definition: ffmpeg.h:522
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1473
AVIOContext * progress_avio
Definition: ffmpeg.c:135
if(ret< 0)
Definition: vf_mcdeint.c:282
int main(int argc, char **argv)
Definition: ffmpeg.c:4532
int reinit_filters
Definition: ffmpeg.h:328
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:478
AVCodecParameters * ref_par
Definition: ffmpeg.h:429
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:263
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1030
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:936
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:491
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:947
uint32_t start_display_time
Definition: avcodec.h:3958
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:92
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1050
uint64_t samples_encoded
Definition: ffmpeg.h:499
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1297
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:3136
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:945
char * key
Definition: dict.h:86
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
Denominator.
Definition: rational.h:60
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:83
AVFrame * last_frame
Definition: ffmpeg.h:433
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1634
int copy_ts
Definition: ffmpeg_opt.c:111
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1002
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1350
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4165
AVFormatContext * ctx
Definition: ffmpeg.h:359
int pict_type
Definition: ffmpeg.h:510
AVBufferRef * hw_device_ctx
Definition: ffmpeg_opt.c:93
AVSubtitle subtitle
Definition: ffmpeg.h:311
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:734
int eof_reached
Definition: ffmpeg.h:360
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
int forced_kf_index
Definition: ffmpeg.h:451
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:489
char * avfilter
Definition: ffmpeg.h:464
uint8_t * name
Definition: ffmpeg.h:233
char * value
Definition: dict.h:87
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:327
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:99
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:713
int channels
number of audio channels
Definition: avcodec.h:2439
int top_field_first
Definition: ffmpeg.h:443
static uint8_t tmp[8]
Definition: des.c:38
OutputFilter ** outputs
Definition: ffmpeg.h:256
InputFile ** input_files
Definition: ffmpeg.c:141
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2683
void av_log_set_flags(int arg)
Definition: log.c:396
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:253
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:221
AVFormatContext * ctx
Definition: ffmpeg.h:517
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:839
int bits_per_coded_sample
The number of bits per sample in the codedwords.
Definition: avcodec.h:4022
uint8_t * extradata
Extra binary data needed for initializing the decoder, codec-dependent.
Definition: avcodec.h:3994
void show_usage(void)
Definition: ffmpeg_opt.c:3098
An instance of a filter.
Definition: avfilter.h:307
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:859
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:332
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1600
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:468
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1287
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4999
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:254
#define av_freep(p)
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:342
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:664
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2035
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:338
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2406
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2938
OutputFile ** output_files
Definition: ffmpeg.c:146
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Definition: avformat.h:1241
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
static void flush_encoders(void)
Definition: ffmpeg.c:1782
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:3984
int copy_tb
Definition: ffmpeg_opt.c:113
int64_t min_pts
Definition: ffmpeg.h:286
int initialized
Definition: ffmpeg.h:480
static volatile int received_sigterm
Definition: ffmpeg.c:316
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:263
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3881
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2182
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:317
int stream_index
Definition: avcodec.h:1603
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:331
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:3942
int64_t first_pts
Definition: ffmpeg.h:420
int nb_inputs
Definition: ffmpeg.h:255
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:949
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
#define DECODING_FOR_OST
Definition: ffmpeg.h:266
int index
Definition: ffmpeg.h:409
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1103
uint64_t_TMPL AV_WL64 unsigned int_TMPL AV_RL32
Definition: bytestream.h:87
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
uint64_t resample_channel_layout
Definition: ffmpeg.h:305
OSTFinished
Definition: ffmpeg.h:402
This structure stores compressed data.
Definition: avcodec.h:1578
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:44
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1092
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5757
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2039
int debug_ts
Definition: ffmpeg_opt.c:114
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3637
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
static void sigterm_handler(int sig)
Definition: ffmpeg.c:323
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1594
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:122
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1733
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:242
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1494
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:838
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:139
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:68
Definition: ffmpeg.h:394
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:773
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3311