FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
utils.c
Go to the documentation of this file.
1 /*
2  * various utility functions for use within FFmpeg
3  * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <stdarg.h>
23 #include <stdint.h>
24 
25 #include "config.h"
26 
27 #include "libavutil/avassert.h"
28 #include "libavutil/avstring.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/parseutils.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/time.h"
37 #include "libavutil/timestamp.h"
38 
39 #include "libavcodec/bytestream.h"
40 #include "libavcodec/internal.h"
41 #include "libavcodec/raw.h"
42 
43 #include "audiointerleave.h"
44 #include "avformat.h"
45 #include "avio_internal.h"
46 #include "id3v2.h"
47 #include "internal.h"
48 #include "metadata.h"
49 #if CONFIG_NETWORK
50 #include "network.h"
51 #endif
52 #include "riff.h"
53 #include "url.h"
54 
55 #include "libavutil/ffversion.h"
56 const char av_format_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
57 
58 /**
59  * @file
60  * various utility functions for use within FFmpeg
61  */
62 
63 unsigned avformat_version(void)
64 {
67 }
68 
69 const char *avformat_configuration(void)
70 {
71  return FFMPEG_CONFIGURATION;
72 }
73 
74 const char *avformat_license(void)
75 {
76 #define LICENSE_PREFIX "libavformat license: "
77  return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
78 }
79 
80 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
81 
82 static int is_relative(int64_t ts) {
83  return ts > (RELATIVE_TS_BASE - (1LL<<48));
84 }
85 
86 /**
87  * Wrap a given time stamp, if there is an indication for an overflow
88  *
89  * @param st stream
90  * @param timestamp the time stamp to wrap
91  * @return resulting time stamp
92  */
93 static int64_t wrap_timestamp(const AVStream *st, int64_t timestamp)
94 {
96  st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
98  timestamp < st->pts_wrap_reference)
99  return timestamp + (1ULL << st->pts_wrap_bits);
100  else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
101  timestamp >= st->pts_wrap_reference)
102  return timestamp - (1ULL << st->pts_wrap_bits);
103  }
104  return timestamp;
105 }
106 
107 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
108 MAKE_ACCESSORS(AVStream, stream, char *, recommended_encoder_configuration)
110 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
111 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
112 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, data_codec)
113 MAKE_ACCESSORS(AVFormatContext, format, int, metadata_header_padding)
114 MAKE_ACCESSORS(AVFormatContext, format, void *, opaque)
115 MAKE_ACCESSORS(AVFormatContext, format, av_format_control_message, control_message_cb)
116 #if FF_API_OLD_OPEN_CALLBACKS
118 MAKE_ACCESSORS(AVFormatContext, format, AVOpenCallback, open_cb)
120 #endif
121 
122 int64_t av_stream_get_end_pts(const AVStream *st)
123 {
124  if (st->priv_pts) {
125  return st->priv_pts->val;
126  } else
127  return AV_NOPTS_VALUE;
128 }
129 
130 struct AVCodecParserContext *av_stream_get_parser(const AVStream *st)
131 {
132  return st->parser;
133 }
134 
135 void av_format_inject_global_side_data(AVFormatContext *s)
136 {
137  int i;
139  for (i = 0; i < s->nb_streams; i++) {
140  AVStream *st = s->streams[i];
141  st->inject_global_side_data = 1;
142  }
143 }
144 
145 int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
146 {
147  av_assert0(!dst->codec_whitelist &&
148  !dst->format_whitelist &&
149  !dst->protocol_whitelist &&
150  !dst->protocol_blacklist);
151  dst-> codec_whitelist = av_strdup(src->codec_whitelist);
155  if ( (src-> codec_whitelist && !dst-> codec_whitelist)
156  || (src-> format_whitelist && !dst-> format_whitelist)
157  || (src->protocol_whitelist && !dst->protocol_whitelist)
158  || (src->protocol_blacklist && !dst->protocol_blacklist)) {
159  av_log(dst, AV_LOG_ERROR, "Failed to duplicate black/whitelist\n");
160  return AVERROR(ENOMEM);
161  }
162  return 0;
163 }
164 
165 static const AVCodec *find_decoder(AVFormatContext *s, const AVStream *st, enum AVCodecID codec_id)
166 {
167 #if FF_API_LAVF_AVCTX
169  if (st->codec->codec)
170  return st->codec->codec;
172 #endif
173 
174  switch (st->codecpar->codec_type) {
175  case AVMEDIA_TYPE_VIDEO:
176  if (s->video_codec) return s->video_codec;
177  break;
178  case AVMEDIA_TYPE_AUDIO:
179  if (s->audio_codec) return s->audio_codec;
180  break;
182  if (s->subtitle_codec) return s->subtitle_codec;
183  break;
184  }
185 
186  return avcodec_find_decoder(codec_id);
187 }
188 
189 static const AVCodec *find_probe_decoder(AVFormatContext *s, const AVStream *st, enum AVCodecID codec_id)
190 {
191  const AVCodec *codec;
192 
193 #if CONFIG_H264_DECODER
194  /* Other parts of the code assume this decoder to be used for h264,
195  * so force it if possible. */
196  if (codec_id == AV_CODEC_ID_H264)
197  return avcodec_find_decoder_by_name("h264");
198 #endif
199 
200  codec = find_decoder(s, st, codec_id);
201  if (!codec)
202  return NULL;
203 
205  const AVCodec *probe_codec = NULL;
206  while (probe_codec = av_codec_next(probe_codec)) {
207  if (probe_codec->id == codec_id &&
208  av_codec_is_decoder(probe_codec) &&
210  return probe_codec;
211  }
212  }
213  }
214 
215  return codec;
216 }
217 
218 int av_format_get_probe_score(const AVFormatContext *s)
219 {
220  return s->probe_score;
221 }
222 
223 /* an arbitrarily chosen "sane" max packet size -- 50M */
224 #define SANE_CHUNK_SIZE (50000000)
225 
227 {
228  if (s->maxsize>= 0) {
229  int64_t remaining= s->maxsize - avio_tell(s);
230  if (remaining < size) {
231  int64_t newsize = avio_size(s);
232  if (!s->maxsize || s->maxsize<newsize)
233  s->maxsize = newsize - !newsize;
234  remaining= s->maxsize - avio_tell(s);
235  remaining= FFMAX(remaining, 0);
236  }
237 
238  if (s->maxsize>= 0 && remaining+1 < size) {
239  av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
240  size = remaining+1;
241  }
242  }
243  return size;
244 }
245 
246 /* Read the data in sane-sized chunks and append to pkt.
247  * Return the number of bytes read or an error. */
249 {
250  int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
251  int orig_size = pkt->size;
252  int ret;
253 
254  do {
255  int prev_size = pkt->size;
256  int read_size;
257 
258  /* When the caller requests a lot of data, limit it to the amount
259  * left in file or SANE_CHUNK_SIZE when it is not known. */
260  read_size = size;
261  if (read_size > SANE_CHUNK_SIZE/10) {
262  read_size = ffio_limit(s, read_size);
263  // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
264  if (s->maxsize < 0)
265  read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
266  }
267 
268  ret = av_grow_packet(pkt, read_size);
269  if (ret < 0)
270  break;
271 
272  ret = avio_read(s, pkt->data + prev_size, read_size);
273  if (ret != read_size) {
274  av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
275  break;
276  }
277 
278  size -= read_size;
279  } while (size > 0);
280  if (size > 0)
281  pkt->flags |= AV_PKT_FLAG_CORRUPT;
282 
283  pkt->pos = orig_pos;
284  if (!pkt->size)
285  av_packet_unref(pkt);
286  return pkt->size > orig_size ? pkt->size - orig_size : ret;
287 }
288 
290 {
291  av_init_packet(pkt);
292  pkt->data = NULL;
293  pkt->size = 0;
294  pkt->pos = avio_tell(s);
295 
296  return append_packet_chunked(s, pkt, size);
297 }
298 
300 {
301  if (!pkt->size)
302  return av_get_packet(s, pkt, size);
303  return append_packet_chunked(s, pkt, size);
304 }
305 
306 int av_filename_number_test(const char *filename)
307 {
308  char buf[1024];
309  return filename &&
310  (av_get_frame_filename(buf, sizeof(buf), filename, 1) >= 0);
311 }
312 
313 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st,
314  AVProbeData *pd)
315 {
316  static const struct {
317  const char *name;
318  enum AVCodecID id;
319  enum AVMediaType type;
320  } fmt_id_type[] = {
332  { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
333  { "truehd", AV_CODEC_ID_TRUEHD, AVMEDIA_TYPE_AUDIO },
334  { 0 }
335  };
336  int score;
337  AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
338 
339  if (fmt) {
340  int i;
341  av_log(s, AV_LOG_DEBUG,
342  "Probe with size=%d, packets=%d detected %s with score=%d\n",
344  fmt->name, score);
345  for (i = 0; fmt_id_type[i].name; i++) {
346  if (!strcmp(fmt->name, fmt_id_type[i].name)) {
347  if (fmt_id_type[i].type != AVMEDIA_TYPE_AUDIO &&
348  st->codecpar->sample_rate)
349  continue;
350  if (st->request_probe > score &&
351  st->codecpar->codec_id != fmt_id_type[i].id)
352  continue;
353  st->codecpar->codec_id = fmt_id_type[i].id;
354  st->codecpar->codec_type = fmt_id_type[i].type;
355  st->internal->need_context_update = 1;
356 #if FF_API_LAVF_AVCTX
358  st->codec->codec_type = st->codecpar->codec_type;
359  st->codec->codec_id = st->codecpar->codec_id;
361 #endif
362  return score;
363  }
364  }
365  }
366  return 0;
367 }
368 
369 /************************************************************/
370 /* input media file */
371 
372 int av_demuxer_open(AVFormatContext *ic) {
373  int err;
374 
375  if (ic->format_whitelist && av_match_list(ic->iformat->name, ic->format_whitelist, ',') <= 0) {
376  av_log(ic, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", ic->format_whitelist);
377  return AVERROR(EINVAL);
378  }
379 
380  if (ic->iformat->read_header) {
381  err = ic->iformat->read_header(ic);
382  if (err < 0)
383  return err;
384  }
385 
386  if (ic->pb && !ic->internal->data_offset)
387  ic->internal->data_offset = avio_tell(ic->pb);
388 
389  return 0;
390 }
391 
392 /* Open input file and probe the format if necessary. */
393 static int init_input(AVFormatContext *s, const char *filename,
395 {
396  int ret;
397  AVProbeData pd = { filename, NULL, 0 };
398  int score = AVPROBE_SCORE_RETRY;
399 
400  if (s->pb) {
402  if (!s->iformat)
403  return av_probe_input_buffer2(s->pb, &s->iformat, filename,
404  s, 0, s->format_probesize);
405  else if (s->iformat->flags & AVFMT_NOFILE)
406  av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
407  "will be ignored with AVFMT_NOFILE format.\n");
408  return 0;
409  }
410 
411  if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
412  (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
413  return score;
414 
415  if ((ret = s->io_open(s, &s->pb, filename, AVIO_FLAG_READ | s->avio_flags, options)) < 0)
416  return ret;
417 
418  if (s->iformat)
419  return 0;
420  return av_probe_input_buffer2(s->pb, &s->iformat, filename,
421  s, 0, s->format_probesize);
422 }
423 
424 static int add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
425  AVPacketList **plast_pktl, int ref)
426 {
427  AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
428  int ret;
429 
430  if (!pktl)
431  return AVERROR(ENOMEM);
432 
433  if (ref) {
434  if ((ret = av_packet_ref(&pktl->pkt, pkt)) < 0) {
435  av_free(pktl);
436  return ret;
437  }
438  } else {
439  pktl->pkt = *pkt;
440  }
441 
442  if (*packet_buffer)
443  (*plast_pktl)->next = pktl;
444  else
445  *packet_buffer = pktl;
446 
447  /* Add the packet in the buffered packet list. */
448  *plast_pktl = pktl;
449  return 0;
450 }
451 
452 int avformat_queue_attached_pictures(AVFormatContext *s)
453 {
454  int i, ret;
455  for (i = 0; i < s->nb_streams; i++)
457  s->streams[i]->discard < AVDISCARD_ALL) {
458  if (s->streams[i]->attached_pic.size <= 0) {
460  "Attached picture on stream %d has invalid size, "
461  "ignoring\n", i);
462  continue;
463  }
464 
466  &s->streams[i]->attached_pic,
468  if (ret < 0)
469  return ret;
470  }
471  return 0;
472 }
473 
474 static int update_stream_avctx(AVFormatContext *s)
475 {
476  int i, ret;
477  for (i = 0; i < s->nb_streams; i++) {
478  AVStream *st = s->streams[i];
479 
480  if (!st->internal->need_context_update)
481  continue;
482 
483  /* update internal codec context, for the parser */
485  if (ret < 0)
486  return ret;
487 
488 #if FF_API_LAVF_AVCTX
490  /* update deprecated public codec context */
491  ret = avcodec_parameters_to_context(st->codec, st->codecpar);
492  if (ret < 0)
493  return ret;
495 #endif
496 
497  st->internal->need_context_update = 0;
498  }
499  return 0;
500 }
501 
502 
503 int avformat_open_input(AVFormatContext **ps, const char *filename,
505 {
506  AVFormatContext *s = *ps;
507  int i, ret = 0;
508  AVDictionary *tmp = NULL;
509  ID3v2ExtraMeta *id3v2_extra_meta = NULL;
510 
511  if (!s && !(s = avformat_alloc_context()))
512  return AVERROR(ENOMEM);
513  if (!s->av_class) {
514  av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
515  return AVERROR(EINVAL);
516  }
517  if (fmt)
518  s->iformat = fmt;
519 
520  if (options)
521  av_dict_copy(&tmp, *options, 0);
522 
523  if (s->pb) // must be before any goto fail
525 
526  if ((ret = av_opt_set_dict(s, &tmp)) < 0)
527  goto fail;
528 
529  if ((ret = init_input(s, filename, &tmp)) < 0)
530  goto fail;
531  s->probe_score = ret;
532 
533  if (!s->protocol_whitelist && s->pb && s->pb->protocol_whitelist) {
535  if (!s->protocol_whitelist) {
536  ret = AVERROR(ENOMEM);
537  goto fail;
538  }
539  }
540 
541  if (!s->protocol_blacklist && s->pb && s->pb->protocol_blacklist) {
543  if (!s->protocol_blacklist) {
544  ret = AVERROR(ENOMEM);
545  goto fail;
546  }
547  }
548 
549  if (s->format_whitelist && av_match_list(s->iformat->name, s->format_whitelist, ',') <= 0) {
550  av_log(s, AV_LOG_ERROR, "Format not on whitelist \'%s\'\n", s->format_whitelist);
551  ret = AVERROR(EINVAL);
552  goto fail;
553  }
554 
556 
557  /* Check filename in case an image number is expected. */
558  if (s->iformat->flags & AVFMT_NEEDNUMBER) {
559  if (!av_filename_number_test(filename)) {
560  ret = AVERROR(EINVAL);
561  goto fail;
562  }
563  }
564 
566  av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
567 
568  /* Allocate private data. */
569  if (s->iformat->priv_data_size > 0) {
570  if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
571  ret = AVERROR(ENOMEM);
572  goto fail;
573  }
574  if (s->iformat->priv_class) {
575  *(const AVClass **) s->priv_data = s->iformat->priv_class;
577  if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
578  goto fail;
579  }
580  }
581 
582  /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
583  if (s->pb)
584  ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta, 0);
585 
586  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
587  if ((ret = s->iformat->read_header(s)) < 0)
588  goto fail;
589 
590  if (id3v2_extra_meta) {
591  if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
592  !strcmp(s->iformat->name, "tta")) {
593  if ((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
594  goto fail;
595  } else
596  av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
597  }
598  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
599 
600  if ((ret = avformat_queue_attached_pictures(s)) < 0)
601  goto fail;
602 
603  if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->internal->data_offset)
604  s->internal->data_offset = avio_tell(s->pb);
605 
607 
609 
610  for (i = 0; i < s->nb_streams; i++)
612 
613  if (options) {
614  av_dict_free(options);
615  *options = tmp;
616  }
617  *ps = s;
618  return 0;
619 
620 fail:
621  ff_id3v2_free_extra_meta(&id3v2_extra_meta);
622  av_dict_free(&tmp);
623  if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
624  avio_closep(&s->pb);
626  *ps = NULL;
627  return ret;
628 }
629 
630 /*******************************************************/
631 
632 static void force_codec_ids(AVFormatContext *s, AVStream *st)
633 {
634  switch (st->codecpar->codec_type) {
635  case AVMEDIA_TYPE_VIDEO:
636  if (s->video_codec_id)
637  st->codecpar->codec_id = s->video_codec_id;
638  break;
639  case AVMEDIA_TYPE_AUDIO:
640  if (s->audio_codec_id)
641  st->codecpar->codec_id = s->audio_codec_id;
642  break;
644  if (s->subtitle_codec_id)
646  break;
647  case AVMEDIA_TYPE_DATA:
648  if (s->data_codec_id)
649  st->codecpar->codec_id = s->data_codec_id;
650  break;
651  }
652 }
653 
654 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
655 {
656  if (st->request_probe>0) {
657  AVProbeData *pd = &st->probe_data;
658  int end;
659  av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
660  --st->probe_packets;
661 
662  if (pkt) {
663  uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
664  if (!new_buf) {
666  "Failed to reallocate probe buffer for stream %d\n",
667  st->index);
668  goto no_packet;
669  }
670  pd->buf = new_buf;
671  memcpy(pd->buf + pd->buf_size, pkt->data, pkt->size);
672  pd->buf_size += pkt->size;
673  memset(pd->buf + pd->buf_size, 0, AVPROBE_PADDING_SIZE);
674  } else {
675 no_packet:
676  st->probe_packets = 0;
677  if (!pd->buf_size) {
679  "nothing to probe for stream %d\n", st->index);
680  }
681  }
682 
684  || st->probe_packets<= 0;
685 
686  if (end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)) {
687  int score = set_codec_from_probe_data(s, st, pd);
689  || end) {
690  pd->buf_size = 0;
691  av_freep(&pd->buf);
692  st->request_probe = -1;
693  if (st->codecpar->codec_id != AV_CODEC_ID_NONE) {
694  av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
695  } else
696  av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
697  }
698  force_codec_ids(s, st);
699  }
700  }
701  return 0;
702 }
703 
704 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index, AVPacket *pkt)
705 {
706  int64_t ref = pkt->dts;
707  int i, pts_wrap_behavior;
708  int64_t pts_wrap_reference;
709  AVProgram *first_program;
710 
711  if (ref == AV_NOPTS_VALUE)
712  ref = pkt->pts;
713  if (st->pts_wrap_reference != AV_NOPTS_VALUE || st->pts_wrap_bits >= 63 || ref == AV_NOPTS_VALUE || !s->correct_ts_overflow)
714  return 0;
715  ref &= (1LL << st->pts_wrap_bits)-1;
716 
717  // reference time stamp should be 60 s before first time stamp
718  pts_wrap_reference = ref - av_rescale(60, st->time_base.den, st->time_base.num);
719  // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
720  pts_wrap_behavior = (ref < (1LL << st->pts_wrap_bits) - (1LL << st->pts_wrap_bits-3)) ||
721  (ref < (1LL << st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
723 
724  first_program = av_find_program_from_stream(s, NULL, stream_index);
725 
726  if (!first_program) {
727  int default_stream_index = av_find_default_stream_index(s);
728  if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
729  for (i = 0; i < s->nb_streams; i++) {
731  continue;
732  s->streams[i]->pts_wrap_reference = pts_wrap_reference;
733  s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
734  }
735  }
736  else {
737  st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
738  st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
739  }
740  }
741  else {
742  AVProgram *program = first_program;
743  while (program) {
744  if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
745  pts_wrap_reference = program->pts_wrap_reference;
746  pts_wrap_behavior = program->pts_wrap_behavior;
747  break;
748  }
749  program = av_find_program_from_stream(s, program, stream_index);
750  }
751 
752  // update every program with differing pts_wrap_reference
753  program = first_program;
754  while (program) {
755  if (program->pts_wrap_reference != pts_wrap_reference) {
756  for (i = 0; i<program->nb_stream_indexes; i++) {
757  s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
758  s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
759  }
760 
761  program->pts_wrap_reference = pts_wrap_reference;
762  program->pts_wrap_behavior = pts_wrap_behavior;
763  }
764  program = av_find_program_from_stream(s, program, stream_index);
765  }
766  }
767  return 1;
768 }
769 
770 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
771 {
772  int ret, i, err;
773  AVStream *st;
774 
775  for (;;) {
777 
778  if (pktl) {
779  *pkt = pktl->pkt;
780  st = s->streams[pkt->stream_index];
782  if ((err = probe_codec(s, st, NULL)) < 0)
783  return err;
784  if (st->request_probe <= 0) {
785  s->internal->raw_packet_buffer = pktl->next;
787  av_free(pktl);
788  return 0;
789  }
790  }
791 
792  pkt->data = NULL;
793  pkt->size = 0;
794  av_init_packet(pkt);
795  ret = s->iformat->read_packet(s, pkt);
796  if (ret < 0) {
797  /* Some demuxers return FFERROR_REDO when they consume
798  data and discard it (ignored streams, junk, extradata).
799  We must re-call the demuxer to get the real packet. */
800  if (ret == FFERROR_REDO)
801  continue;
802  if (!pktl || ret == AVERROR(EAGAIN))
803  return ret;
804  for (i = 0; i < s->nb_streams; i++) {
805  st = s->streams[i];
806  if (st->probe_packets || st->request_probe > 0)
807  if ((err = probe_codec(s, st, NULL)) < 0)
808  return err;
809  av_assert0(st->request_probe <= 0);
810  }
811  continue;
812  }
813 
814  if (!pkt->buf) {
815  AVPacket tmp = { 0 };
816  ret = av_packet_ref(&tmp, pkt);
817  if (ret < 0)
818  return ret;
819  *pkt = tmp;
820  }
821 
822  if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
823  (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
825  "Dropped corrupted packet (stream = %d)\n",
826  pkt->stream_index);
827  av_packet_unref(pkt);
828  continue;
829  }
830 
831  if (pkt->stream_index >= (unsigned)s->nb_streams) {
832  av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
833  continue;
834  }
835 
836  st = s->streams[pkt->stream_index];
837 
839  // correct first time stamps to negative values
840  if (!is_relative(st->first_dts))
841  st->first_dts = wrap_timestamp(st, st->first_dts);
842  if (!is_relative(st->start_time))
843  st->start_time = wrap_timestamp(st, st->start_time);
844  if (!is_relative(st->cur_dts))
845  st->cur_dts = wrap_timestamp(st, st->cur_dts);
846  }
847 
848  pkt->dts = wrap_timestamp(st, pkt->dts);
849  pkt->pts = wrap_timestamp(st, pkt->pts);
850 
851  force_codec_ids(s, st);
852 
853  /* TODO: audio: time filter; video: frame reordering (pts != dts) */
855  pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
856 
857  if (!pktl && st->request_probe <= 0)
858  return ret;
859 
860  err = add_to_pktbuf(&s->internal->raw_packet_buffer, pkt,
862  if (err)
863  return err;
865 
866  if ((err = probe_codec(s, st, pkt)) < 0)
867  return err;
868  }
869 }
870 
871 
872 /**********************************************************/
873 
875 {
876  if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
877  avctx->codec_id == AV_CODEC_ID_MP1 ||
878  avctx->codec_id == AV_CODEC_ID_MP2 ||
879  avctx->codec_id == AV_CODEC_ID_MP3/* ||
880  avctx->codec_id == AV_CODEC_ID_CELT*/)
881  return 1;
882  return 0;
883 }
884 
885 /**
886  * Return the frame duration in seconds. Return 0 if not available.
887  */
888 void ff_compute_frame_duration(AVFormatContext *s, int *pnum, int *pden, AVStream *st,
890 {
891  AVRational codec_framerate = s->iformat ? st->internal->avctx->framerate :
893  int frame_size, sample_rate;
894 
895 #if FF_API_LAVF_AVCTX
897  if ((!codec_framerate.den || !codec_framerate.num) && st->codec->time_base.den && st->codec->time_base.num)
898  codec_framerate = av_mul_q(av_inv_q(st->codec->time_base), (AVRational){1, st->codec->ticks_per_frame});
900 #endif
901 
902  *pnum = 0;
903  *pden = 0;
904  switch (st->codecpar->codec_type) {
905  case AVMEDIA_TYPE_VIDEO:
906  if (st->r_frame_rate.num && !pc && s->iformat) {
907  *pnum = st->r_frame_rate.den;
908  *pden = st->r_frame_rate.num;
909  } else if (st->time_base.num * 1000LL > st->time_base.den) {
910  *pnum = st->time_base.num;
911  *pden = st->time_base.den;
912  } else if (codec_framerate.den * 1000LL > codec_framerate.num) {
913  av_assert0(st->internal->avctx->ticks_per_frame);
914  av_reduce(pnum, pden,
915  codec_framerate.den,
916  codec_framerate.num * (int64_t)st->internal->avctx->ticks_per_frame,
917  INT_MAX);
918 
919  if (pc && pc->repeat_pict) {
920  av_assert0(s->iformat); // this may be wrong for interlaced encoding but its not used for that case
921  av_reduce(pnum, pden,
922  (*pnum) * (1LL + pc->repeat_pict),
923  (*pden),
924  INT_MAX);
925  }
926  /* If this codec can be interlaced or progressive then we need
927  * a parser to compute duration of a packet. Thus if we have
928  * no parser in such case leave duration undefined. */
929  if (st->internal->avctx->ticks_per_frame > 1 && !pc)
930  *pnum = *pden = 0;
931  }
932  break;
933  case AVMEDIA_TYPE_AUDIO:
934  if (st->internal->avctx_inited) {
935  frame_size = av_get_audio_frame_duration(st->internal->avctx, pkt->size);
936  sample_rate = st->internal->avctx->sample_rate;
937  } else {
938  frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
939  sample_rate = st->codecpar->sample_rate;
940  }
941  if (frame_size <= 0 || sample_rate <= 0)
942  break;
943  *pnum = frame_size;
944  *pden = sample_rate;
945  break;
946  default:
947  break;
948  }
949 }
950 
951 static int is_intra_only(enum AVCodecID id)
952 {
954  if (!d)
955  return 0;
957  return 0;
958  return 1;
959 }
960 
961 static int has_decode_delay_been_guessed(AVStream *st)
962 {
963  if (st->codecpar->codec_id != AV_CODEC_ID_H264) return 1;
964  if (!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
965  return 1;
966 #if CONFIG_H264_DECODER
967  if (st->internal->avctx->has_b_frames &&
969  return 1;
970 #endif
971  if (st->internal->avctx->has_b_frames<3)
972  return st->nb_decoded_frames >= 7;
973  else if (st->internal->avctx->has_b_frames<4)
974  return st->nb_decoded_frames >= 18;
975  else
976  return st->nb_decoded_frames >= 20;
977 }
978 
979 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
980 {
981  if (pktl->next)
982  return pktl->next;
983  if (pktl == s->internal->packet_buffer_end)
984  return s->internal->parse_queue;
985  return NULL;
986 }
987 
988 static int64_t select_from_pts_buffer(AVStream *st, int64_t *pts_buffer, int64_t dts) {
989  int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 &&
991 
992  if(!onein_oneout) {
993  int delay = st->internal->avctx->has_b_frames;
994  int i;
995 
996  if (dts == AV_NOPTS_VALUE) {
997  int64_t best_score = INT64_MAX;
998  for (i = 0; i<delay; i++) {
999  if (st->pts_reorder_error_count[i]) {
1000  int64_t score = st->pts_reorder_error[i] / st->pts_reorder_error_count[i];
1001  if (score < best_score) {
1002  best_score = score;
1003  dts = pts_buffer[i];
1004  }
1005  }
1006  }
1007  } else {
1008  for (i = 0; i<delay; i++) {
1009  if (pts_buffer[i] != AV_NOPTS_VALUE) {
1010  int64_t diff = FFABS(pts_buffer[i] - dts)
1011  + (uint64_t)st->pts_reorder_error[i];
1012  diff = FFMAX(diff, st->pts_reorder_error[i]);
1013  st->pts_reorder_error[i] = diff;
1014  st->pts_reorder_error_count[i]++;
1015  if (st->pts_reorder_error_count[i] > 250) {
1016  st->pts_reorder_error[i] >>= 1;
1017  st->pts_reorder_error_count[i] >>= 1;
1018  }
1019  }
1020  }
1021  }
1022  }
1023 
1024  if (dts == AV_NOPTS_VALUE)
1025  dts = pts_buffer[0];
1026 
1027  return dts;
1028 }
1029 
1030 /**
1031  * Updates the dts of packets of a stream in pkt_buffer, by re-ordering the pts
1032  * of the packets in a window.
1033  */
1034 static void update_dts_from_pts(AVFormatContext *s, int stream_index,
1035  AVPacketList *pkt_buffer)
1036 {
1037  AVStream *st = s->streams[stream_index];
1038  int delay = st->internal->avctx->has_b_frames;
1039  int i;
1040 
1041  int64_t pts_buffer[MAX_REORDER_DELAY+1];
1042 
1043  for (i = 0; i<MAX_REORDER_DELAY+1; i++)
1044  pts_buffer[i] = AV_NOPTS_VALUE;
1045 
1046  for (; pkt_buffer; pkt_buffer = get_next_pkt(s, st, pkt_buffer)) {
1047  if (pkt_buffer->pkt.stream_index != stream_index)
1048  continue;
1049 
1050  if (pkt_buffer->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
1051  pts_buffer[0] = pkt_buffer->pkt.pts;
1052  for (i = 0; i<delay && pts_buffer[i] > pts_buffer[i + 1]; i++)
1053  FFSWAP(int64_t, pts_buffer[i], pts_buffer[i + 1]);
1054 
1055  pkt_buffer->pkt.dts = select_from_pts_buffer(st, pts_buffer, pkt_buffer->pkt.dts);
1056  }
1057  }
1058 }
1059 
1060 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
1061  int64_t dts, int64_t pts, AVPacket *pkt)
1062 {
1063  AVStream *st = s->streams[stream_index];
1065  AVPacketList *pktl_it;
1066 
1067  uint64_t shift;
1068 
1069  if (st->first_dts != AV_NOPTS_VALUE ||
1070  dts == AV_NOPTS_VALUE ||
1071  st->cur_dts == AV_NOPTS_VALUE ||
1072  is_relative(dts))
1073  return;
1074 
1075  st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE);
1076  st->cur_dts = dts;
1077  shift = (uint64_t)st->first_dts - RELATIVE_TS_BASE;
1078 
1079  if (is_relative(pts))
1080  pts += shift;
1081 
1082  for (pktl_it = pktl; pktl_it; pktl_it = get_next_pkt(s, st, pktl_it)) {
1083  if (pktl_it->pkt.stream_index != stream_index)
1084  continue;
1085  if (is_relative(pktl_it->pkt.pts))
1086  pktl_it->pkt.pts += shift;
1087 
1088  if (is_relative(pktl_it->pkt.dts))
1089  pktl_it->pkt.dts += shift;
1090 
1091  if (st->start_time == AV_NOPTS_VALUE && pktl_it->pkt.pts != AV_NOPTS_VALUE) {
1092  st->start_time = pktl_it->pkt.pts;
1095  }
1096  }
1097 
1099  update_dts_from_pts(s, stream_index, pktl);
1100  }
1101 
1102  if (st->start_time == AV_NOPTS_VALUE) {
1103  st->start_time = pts;
1104  if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && st->codecpar->sample_rate)
1105  st->start_time += av_rescale_q(st->skip_samples, (AVRational){1, st->codecpar->sample_rate}, st->time_base);
1106  }
1107 }
1108 
1109 static void update_initial_durations(AVFormatContext *s, AVStream *st,
1110  int stream_index, int duration)
1111 {
1113  int64_t cur_dts = RELATIVE_TS_BASE;
1114 
1115  if (st->first_dts != AV_NOPTS_VALUE) {
1117  return;
1119  cur_dts = st->first_dts;
1120  for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
1121  if (pktl->pkt.stream_index == stream_index) {
1122  if (pktl->pkt.pts != pktl->pkt.dts ||
1123  pktl->pkt.dts != AV_NOPTS_VALUE ||
1124  pktl->pkt.duration)
1125  break;
1126  cur_dts -= duration;
1127  }
1128  }
1129  if (pktl && pktl->pkt.dts != st->first_dts) {
1130  av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %"PRId64") in the queue\n",
1131  av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1132  return;
1133  }
1134  if (!pktl) {
1135  av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1136  return;
1137  }
1139  st->first_dts = cur_dts;
1140  } else if (st->cur_dts != RELATIVE_TS_BASE)
1141  return;
1142 
1143  for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
1144  if (pktl->pkt.stream_index != stream_index)
1145  continue;
1146  if (pktl->pkt.pts == pktl->pkt.dts &&
1147  (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts) &&
1148  !pktl->pkt.duration) {
1149  pktl->pkt.dts = cur_dts;
1150  if (!st->internal->avctx->has_b_frames)
1151  pktl->pkt.pts = cur_dts;
1152 // if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
1153  pktl->pkt.duration = duration;
1154  } else
1155  break;
1156  cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1157  }
1158  if (!pktl)
1159  st->cur_dts = cur_dts;
1160 }
1161 
1162 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1164  int64_t next_dts, int64_t next_pts)
1165 {
1166  int num, den, presentation_delayed, delay, i;
1167  int64_t offset;
1169  int onein_oneout = st->codecpar->codec_id != AV_CODEC_ID_H264 &&
1171 
1172  if (s->flags & AVFMT_FLAG_NOFILLIN)
1173  return;
1174 
1175  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && pkt->dts != AV_NOPTS_VALUE) {
1176  if (pkt->dts == pkt->pts && st->last_dts_for_order_check != AV_NOPTS_VALUE) {
1177  if (st->last_dts_for_order_check <= pkt->dts) {
1178  st->dts_ordered++;
1179  } else {
1181  "DTS %"PRIi64" < %"PRIi64" out of order\n",
1182  pkt->dts,
1184  st->dts_misordered++;
1185  }
1186  if (st->dts_ordered + st->dts_misordered > 250) {
1187  st->dts_ordered >>= 1;
1188  st->dts_misordered >>= 1;
1189  }
1190  }
1191 
1192  st->last_dts_for_order_check = pkt->dts;
1193  if (st->dts_ordered < 8*st->dts_misordered && pkt->dts == pkt->pts)
1194  pkt->dts = AV_NOPTS_VALUE;
1195  }
1196 
1197  if ((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1198  pkt->dts = AV_NOPTS_VALUE;
1199 
1200  if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1201  && !st->internal->avctx->has_b_frames)
1202  //FIXME Set low_delay = 0 when has_b_frames = 1
1203  st->internal->avctx->has_b_frames = 1;
1204 
1205  /* do we have a video B-frame ? */
1206  delay = st->internal->avctx->has_b_frames;
1207  presentation_delayed = 0;
1208 
1209  /* XXX: need has_b_frame, but cannot get it if the codec is
1210  * not initialized */
1211  if (delay &&
1212  pc && pc->pict_type != AV_PICTURE_TYPE_B)
1213  presentation_delayed = 1;
1214 
1215  if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1216  st->pts_wrap_bits < 63 &&
1217  pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1218  if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
1219  pkt->dts -= 1LL << st->pts_wrap_bits;
1220  } else
1221  pkt->pts += 1LL << st->pts_wrap_bits;
1222  }
1223 
1224  /* Some MPEG-2 in MPEG-PS lack dts (issue #171 / input_file.mpg).
1225  * We take the conservative approach and discard both.
1226  * Note: If this is misbehaving for an H.264 file, then possibly
1227  * presentation_delayed is not set correctly. */
1228  if (delay == 1 && pkt->dts == pkt->pts &&
1229  pkt->dts != AV_NOPTS_VALUE && presentation_delayed) {
1230  av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1231  if ( strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")
1232  && strcmp(s->iformat->name, "flv")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1233  pkt->dts = AV_NOPTS_VALUE;
1234  }
1235 
1236  duration = av_mul_q((AVRational) {pkt->duration, 1}, st->time_base);
1237  if (pkt->duration == 0) {
1238  ff_compute_frame_duration(s, &num, &den, st, pc, pkt);
1239  if (den && num) {
1240  duration = (AVRational) {num, den};
1241  pkt->duration = av_rescale_rnd(1,
1242  num * (int64_t) st->time_base.den,
1243  den * (int64_t) st->time_base.num,
1244  AV_ROUND_DOWN);
1245  }
1246  }
1247 
1248  if (pkt->duration != 0 && (s->internal->packet_buffer || s->internal->parse_queue))
1249  update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1250 
1251  /* Correct timestamps with byte offset if demuxers only have timestamps
1252  * on packet boundaries */
1253  if (pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size) {
1254  /* this will estimate bitrate based on this frame's duration and size */
1255  offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1256  if (pkt->pts != AV_NOPTS_VALUE)
1257  pkt->pts += offset;
1258  if (pkt->dts != AV_NOPTS_VALUE)
1259  pkt->dts += offset;
1260  }
1261 
1262  /* This may be redundant, but it should not hurt. */
1263  if (pkt->dts != AV_NOPTS_VALUE &&
1264  pkt->pts != AV_NOPTS_VALUE &&
1265  pkt->pts > pkt->dts)
1266  presentation_delayed = 1;
1267 
1268  if (s->debug & FF_FDEBUG_TS)
1269  av_log(s, AV_LOG_TRACE,
1270  "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%"PRId64" delay:%d onein_oneout:%d\n",
1271  presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
1272  pkt->stream_index, pc, pkt->duration, delay, onein_oneout);
1273 
1274  /* Interpolate PTS and DTS if they are not present. We skip H264
1275  * currently because delay and has_b_frames are not reliably set. */
1276  if ((delay == 0 || (delay == 1 && pc)) &&
1277  onein_oneout) {
1278  if (presentation_delayed) {
1279  /* DTS = decompression timestamp */
1280  /* PTS = presentation timestamp */
1281  if (pkt->dts == AV_NOPTS_VALUE)
1282  pkt->dts = st->last_IP_pts;
1283  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1284  if (pkt->dts == AV_NOPTS_VALUE)
1285  pkt->dts = st->cur_dts;
1286 
1287  /* This is tricky: the dts must be incremented by the duration
1288  * of the frame we are displaying, i.e. the last I- or P-frame. */
1289  if (st->last_IP_duration == 0)
1290  st->last_IP_duration = pkt->duration;
1291  if (pkt->dts != AV_NOPTS_VALUE)
1292  st->cur_dts = pkt->dts + st->last_IP_duration;
1293  if (pkt->dts != AV_NOPTS_VALUE &&
1294  pkt->pts == AV_NOPTS_VALUE &&
1295  st->last_IP_duration > 0 &&
1296  ((uint64_t)st->cur_dts - (uint64_t)next_dts + 1) <= 2 &&
1297  next_dts != next_pts &&
1298  next_pts != AV_NOPTS_VALUE)
1299  pkt->pts = next_dts;
1300 
1301  st->last_IP_duration = pkt->duration;
1302  st->last_IP_pts = pkt->pts;
1303  /* Cannot compute PTS if not present (we can compute it only
1304  * by knowing the future. */
1305  } else if (pkt->pts != AV_NOPTS_VALUE ||
1306  pkt->dts != AV_NOPTS_VALUE ||
1307  pkt->duration ) {
1308 
1309  /* presentation is not delayed : PTS and DTS are the same */
1310  if (pkt->pts == AV_NOPTS_VALUE)
1311  pkt->pts = pkt->dts;
1313  pkt->pts, pkt);
1314  if (pkt->pts == AV_NOPTS_VALUE)
1315  pkt->pts = st->cur_dts;
1316  pkt->dts = pkt->pts;
1317  if (pkt->pts != AV_NOPTS_VALUE)
1318  st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
1319  }
1320  }
1321 
1322  if (pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
1323  st->pts_buffer[0] = pkt->pts;
1324  for (i = 0; i<delay && st->pts_buffer[i] > st->pts_buffer[i + 1]; i++)
1325  FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i + 1]);
1326 
1328  pkt->dts = select_from_pts_buffer(st, st->pts_buffer, pkt->dts);
1329  }
1330  // We skipped it above so we try here.
1331  if (!onein_oneout)
1332  // This should happen on the first packet
1333  update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1334  if (pkt->dts > st->cur_dts)
1335  st->cur_dts = pkt->dts;
1336 
1337  if (s->debug & FF_FDEBUG_TS)
1338  av_log(s, AV_LOG_TRACE, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1339  presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1340 
1341  /* update flags */
1342  if (is_intra_only(st->codecpar->codec_id))
1343  pkt->flags |= AV_PKT_FLAG_KEY;
1344 #if FF_API_CONVERGENCE_DURATION
1346  if (pc)
1349 #endif
1350 }
1351 
1352 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1353 {
1354  while (*pkt_buf) {
1355  AVPacketList *pktl = *pkt_buf;
1356  *pkt_buf = pktl->next;
1357  av_packet_unref(&pktl->pkt);
1358  av_freep(&pktl);
1359  }
1360  *pkt_buf_end = NULL;
1361 }
1362 
1363 /**
1364  * Parse a packet, add all split parts to parse_queue.
1365  *
1366  * @param pkt Packet to parse, NULL when flushing the parser at end of stream.
1367  */
1368 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1369 {
1370  AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1371  AVStream *st = s->streams[stream_index];
1372  uint8_t *data = pkt ? pkt->data : NULL;
1373  int size = pkt ? pkt->size : 0;
1374  int ret = 0, got_output = 0;
1375 
1376  if (!pkt) {
1378  pkt = &flush_pkt;
1379  got_output = 1;
1380  } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1381  // preserve 0-size sync packets
1383  }
1384 
1385  while (size > 0 || (pkt == &flush_pkt && got_output)) {
1386  int len;
1387  int64_t next_pts = pkt->pts;
1388  int64_t next_dts = pkt->dts;
1389 
1390  av_init_packet(&out_pkt);
1391  len = av_parser_parse2(st->parser, st->internal->avctx,
1392  &out_pkt.data, &out_pkt.size, data, size,
1393  pkt->pts, pkt->dts, pkt->pos);
1394 
1395  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1396  pkt->pos = -1;
1397  /* increment read pointer */
1398  data += len;
1399  size -= len;
1400 
1401  got_output = !!out_pkt.size;
1402 
1403  if (!out_pkt.size)
1404  continue;
1405 
1406  if (pkt->side_data) {
1407  out_pkt.side_data = pkt->side_data;
1408  out_pkt.side_data_elems = pkt->side_data_elems;
1409  pkt->side_data = NULL;
1410  pkt->side_data_elems = 0;
1411  }
1412 
1413  /* set the duration */
1414  out_pkt.duration = (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->duration : 0;
1416  if (st->internal->avctx->sample_rate > 0) {
1417  out_pkt.duration =
1419  (AVRational) { 1, st->internal->avctx->sample_rate },
1420  st->time_base,
1421  AV_ROUND_DOWN);
1422  }
1423  }
1424 
1425  out_pkt.stream_index = st->index;
1426  out_pkt.pts = st->parser->pts;
1427  out_pkt.dts = st->parser->dts;
1428  out_pkt.pos = st->parser->pos;
1429 
1431  out_pkt.pos = st->parser->frame_offset;
1432 
1433  if (st->parser->key_frame == 1 ||
1434  (st->parser->key_frame == -1 &&
1436  out_pkt.flags |= AV_PKT_FLAG_KEY;
1437 
1438  if (st->parser->key_frame == -1 && st->parser->pict_type ==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1439  out_pkt.flags |= AV_PKT_FLAG_KEY;
1440 
1441  compute_pkt_fields(s, st, st->parser, &out_pkt, next_dts, next_pts);
1442 
1443  ret = add_to_pktbuf(&s->internal->parse_queue, &out_pkt,
1444  &s->internal->parse_queue_end, 1);
1445  av_packet_unref(&out_pkt);
1446  if (ret < 0)
1447  goto fail;
1448  }
1449 
1450  /* end of the stream => close and free the parser */
1451  if (pkt == &flush_pkt) {
1452  av_parser_close(st->parser);
1453  st->parser = NULL;
1454  }
1455 
1456 fail:
1458  return ret;
1459 }
1460 
1461 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1462  AVPacketList **pkt_buffer_end,
1463  AVPacket *pkt)
1464 {
1465  AVPacketList *pktl;
1466  av_assert0(*pkt_buffer);
1467  pktl = *pkt_buffer;
1468  *pkt = pktl->pkt;
1469  *pkt_buffer = pktl->next;
1470  if (!pktl->next)
1471  *pkt_buffer_end = NULL;
1472  av_freep(&pktl);
1473  return 0;
1474 }
1475 
1476 static int64_t ts_to_samples(AVStream *st, int64_t ts)
1477 {
1478  return av_rescale(ts, st->time_base.num * st->codecpar->sample_rate, st->time_base.den);
1479 }
1480 
1481 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1482 {
1483  int ret = 0, i, got_packet = 0;
1484  AVDictionary *metadata = NULL;
1485 
1486  av_init_packet(pkt);
1487 
1488  while (!got_packet && !s->internal->parse_queue) {
1489  AVStream *st;
1490  AVPacket cur_pkt;
1491 
1492  /* read next packet */
1493  ret = ff_read_packet(s, &cur_pkt);
1494  if (ret < 0) {
1495  if (ret == AVERROR(EAGAIN))
1496  return ret;
1497  /* flush the parsers */
1498  for (i = 0; i < s->nb_streams; i++) {
1499  st = s->streams[i];
1500  if (st->parser && st->need_parsing)
1501  parse_packet(s, NULL, st->index);
1502  }
1503  /* all remaining packets are now in parse_queue =>
1504  * really terminate parsing */
1505  break;
1506  }
1507  ret = 0;
1508  st = s->streams[cur_pkt.stream_index];
1509 
1510  /* update context if required */
1511  if (st->internal->need_context_update) {
1512  if (avcodec_is_open(st->internal->avctx)) {
1513  av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n");
1515  st->info->found_decoder = 0;
1516  }
1517 
1519  if (ret < 0)
1520  return ret;
1521 
1522 #if FF_API_LAVF_AVCTX
1524  /* update deprecated public codec context */
1525  ret = avcodec_parameters_to_context(st->codec, st->codecpar);
1526  if (ret < 0)
1527  return ret;
1529 #endif
1530 
1531  st->internal->need_context_update = 0;
1532  }
1533 
1534  if (cur_pkt.pts != AV_NOPTS_VALUE &&
1535  cur_pkt.dts != AV_NOPTS_VALUE &&
1536  cur_pkt.pts < cur_pkt.dts) {
1538  "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1539  cur_pkt.stream_index,
1540  av_ts2str(cur_pkt.pts),
1541  av_ts2str(cur_pkt.dts),
1542  cur_pkt.size);
1543  }
1544  if (s->debug & FF_FDEBUG_TS)
1545  av_log(s, AV_LOG_DEBUG,
1546  "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",
1547  cur_pkt.stream_index,
1548  av_ts2str(cur_pkt.pts),
1549  av_ts2str(cur_pkt.dts),
1550  cur_pkt.size, cur_pkt.duration, cur_pkt.flags);
1551 
1552  if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1553  st->parser = av_parser_init(st->codecpar->codec_id);
1554  if (!st->parser) {
1555  av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1556  "%s, packets or times may be invalid.\n",
1558  /* no parser available: just output the raw packets */
1560  } else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
1562  else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
1563  st->parser->flags |= PARSER_FLAG_ONCE;
1564  else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1566  }
1567 
1568  if (!st->need_parsing || !st->parser) {
1569  /* no parsing needed: we just output the packet as is */
1570  *pkt = cur_pkt;
1572  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1573  (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1574  ff_reduce_index(s, st->index);
1575  av_add_index_entry(st, pkt->pos, pkt->dts,
1576  0, 0, AVINDEX_KEYFRAME);
1577  }
1578  got_packet = 1;
1579  } else if (st->discard < AVDISCARD_ALL) {
1580  if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1581  return ret;
1583  st->codecpar->bit_rate = st->internal->avctx->bit_rate;
1584  st->codecpar->channels = st->internal->avctx->channels;
1586  st->codecpar->codec_id = st->internal->avctx->codec_id;
1587  } else {
1588  /* free packet */
1589  av_packet_unref(&cur_pkt);
1590  }
1591  if (pkt->flags & AV_PKT_FLAG_KEY)
1592  st->skip_to_keyframe = 0;
1593  if (st->skip_to_keyframe) {
1594  av_packet_unref(&cur_pkt);
1595  if (got_packet) {
1596  *pkt = cur_pkt;
1597  }
1598  got_packet = 0;
1599  }
1600  }
1601 
1602  if (!got_packet && s->internal->parse_queue)
1604 
1605  if (ret >= 0) {
1606  AVStream *st = s->streams[pkt->stream_index];
1607  int discard_padding = 0;
1608  if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
1609  int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
1610  int64_t sample = ts_to_samples(st, pts);
1611  int duration = ts_to_samples(st, pkt->duration);
1612  int64_t end_sample = sample + duration;
1613  if (duration > 0 && end_sample >= st->first_discard_sample &&
1614  sample < st->last_discard_sample)
1615  discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);
1616  }
1617  if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))
1618  st->skip_samples = st->start_skip_samples;
1619  if (st->skip_samples || discard_padding) {
1621  if (p) {
1622  AV_WL32(p, st->skip_samples);
1623  AV_WL32(p + 4, discard_padding);
1624  av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding);
1625  }
1626  st->skip_samples = 0;
1627  }
1628 
1629  if (st->inject_global_side_data) {
1630  for (i = 0; i < st->nb_side_data; i++) {
1631  AVPacketSideData *src_sd = &st->side_data[i];
1632  uint8_t *dst_data;
1633 
1634  if (av_packet_get_side_data(pkt, src_sd->type, NULL))
1635  continue;
1636 
1637  dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
1638  if (!dst_data) {
1639  av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
1640  continue;
1641  }
1642 
1643  memcpy(dst_data, src_sd->data, src_sd->size);
1644  }
1645  st->inject_global_side_data = 0;
1646  }
1647 
1648  if (!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
1650  }
1651 
1652  av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
1653  if (metadata) {
1655  av_dict_copy(&s->metadata, metadata, 0);
1656  av_dict_free(&metadata);
1658  }
1659 
1660 #if FF_API_LAVF_AVCTX
1662 #endif
1663 
1664  if (s->debug & FF_FDEBUG_TS)
1665  av_log(s, AV_LOG_DEBUG,
1666  "read_frame_internal stream=%d, pts=%s, dts=%s, "
1667  "size=%d, duration=%"PRId64", flags=%d\n",
1668  pkt->stream_index,
1669  av_ts2str(pkt->pts),
1670  av_ts2str(pkt->dts),
1671  pkt->size, pkt->duration, pkt->flags);
1672 
1673  return ret;
1674 }
1675 
1676 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1677 {
1678  const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1679  int eof = 0;
1680  int ret;
1681  AVStream *st;
1682 
1683  if (!genpts) {
1684  ret = s->internal->packet_buffer
1686  &s->internal->packet_buffer_end, pkt)
1687  : read_frame_internal(s, pkt);
1688  if (ret < 0)
1689  return ret;
1690  goto return_packet;
1691  }
1692 
1693  for (;;) {
1694  AVPacketList *pktl = s->internal->packet_buffer;
1695 
1696  if (pktl) {
1697  AVPacket *next_pkt = &pktl->pkt;
1698 
1699  if (next_pkt->dts != AV_NOPTS_VALUE) {
1700  int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1701  // last dts seen for this stream. if any of packets following
1702  // current one had no dts, we will set this to AV_NOPTS_VALUE.
1703  int64_t last_dts = next_pkt->dts;
1704  while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1705  if (pktl->pkt.stream_index == next_pkt->stream_index &&
1706  (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1707  if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) {
1708  // not B-frame
1709  next_pkt->pts = pktl->pkt.dts;
1710  }
1711  if (last_dts != AV_NOPTS_VALUE) {
1712  // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1713  last_dts = pktl->pkt.dts;
1714  }
1715  }
1716  pktl = pktl->next;
1717  }
1718  if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1719  // Fixing the last reference frame had none pts issue (For MXF etc).
1720  // We only do this when
1721  // 1. eof.
1722  // 2. we are not able to resolve a pts value for current packet.
1723  // 3. the packets for this stream at the end of the files had valid dts.
1724  next_pkt->pts = last_dts + next_pkt->duration;
1725  }
1726  pktl = s->internal->packet_buffer;
1727  }
1728 
1729  /* read packet from packet buffer, if there is data */
1730  st = s->streams[next_pkt->stream_index];
1731  if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
1732  next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1734  &s->internal->packet_buffer_end, pkt);
1735  goto return_packet;
1736  }
1737  }
1738 
1739  ret = read_frame_internal(s, pkt);
1740  if (ret < 0) {
1741  if (pktl && ret != AVERROR(EAGAIN)) {
1742  eof = 1;
1743  continue;
1744  } else
1745  return ret;
1746  }
1747 
1748  ret = add_to_pktbuf(&s->internal->packet_buffer, pkt,
1749  &s->internal->packet_buffer_end, 1);
1750  av_packet_unref(pkt);
1751  if (ret < 0)
1752  return ret;
1753  }
1754 
1755 return_packet:
1756 
1757  st = s->streams[pkt->stream_index];
1758  if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1759  ff_reduce_index(s, st->index);
1760  av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1761  }
1762 
1763  if (is_relative(pkt->dts))
1764  pkt->dts -= RELATIVE_TS_BASE;
1765  if (is_relative(pkt->pts))
1766  pkt->pts -= RELATIVE_TS_BASE;
1767 
1768  return ret;
1769 }
1770 
1771 /* XXX: suppress the packet queue */
1772 static void flush_packet_queue(AVFormatContext *s)
1773 {
1774  if (!s->internal)
1775  return;
1779 
1781 }
1782 
1783 /*******************************************************/
1784 /* seek support */
1785 
1786 int av_find_default_stream_index(AVFormatContext *s)
1787 {
1788  int i;
1789  AVStream *st;
1790  int best_stream = 0;
1791  int best_score = INT_MIN;
1792 
1793  if (s->nb_streams <= 0)
1794  return -1;
1795  for (i = 0; i < s->nb_streams; i++) {
1796  int score = 0;
1797  st = s->streams[i];
1798  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1800  score -= 400;
1801  if (st->codecpar->width && st->codecpar->height)
1802  score += 50;
1803  score+= 25;
1804  }
1805  if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1806  if (st->codecpar->sample_rate)
1807  score += 50;
1808  }
1809  if (st->codec_info_nb_frames)
1810  score += 12;
1811 
1812  if (st->discard != AVDISCARD_ALL)
1813  score += 200;
1814 
1815  if (score > best_score) {
1816  best_score = score;
1817  best_stream = i;
1818  }
1819  }
1820  return best_stream;
1821 }
1822 
1823 /** Flush the frame reader. */
1824 void ff_read_frame_flush(AVFormatContext *s)
1825 {
1826  AVStream *st;
1827  int i, j;
1828 
1829  flush_packet_queue(s);
1830 
1831  /* Reset read state for each stream. */
1832  for (i = 0; i < s->nb_streams; i++) {
1833  st = s->streams[i];
1834 
1835  if (st->parser) {
1836  av_parser_close(st->parser);
1837  st->parser = NULL;
1838  }
1841  if (st->first_dts == AV_NOPTS_VALUE)
1842  st->cur_dts = RELATIVE_TS_BASE;
1843  else
1844  /* We set the current DTS to an unspecified origin. */
1845  st->cur_dts = AV_NOPTS_VALUE;
1846 
1848 
1849  for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
1850  st->pts_buffer[j] = AV_NOPTS_VALUE;
1851 
1853  st->inject_global_side_data = 1;
1854 
1855  st->skip_samples = 0;
1856  }
1857 }
1858 
1859 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1860 {
1861  int i;
1862 
1863  for (i = 0; i < s->nb_streams; i++) {
1864  AVStream *st = s->streams[i];
1865 
1866  st->cur_dts =
1867  av_rescale(timestamp,
1868  st->time_base.den * (int64_t) ref_st->time_base.num,
1869  st->time_base.num * (int64_t) ref_st->time_base.den);
1870  }
1871 }
1872 
1873 void ff_reduce_index(AVFormatContext *s, int stream_index)
1874 {
1875  AVStream *st = s->streams[stream_index];
1876  unsigned int max_entries = s->max_index_size / sizeof(AVIndexEntry);
1877 
1878  if ((unsigned) st->nb_index_entries >= max_entries) {
1879  int i;
1880  for (i = 0; 2 * i < st->nb_index_entries; i++)
1881  st->index_entries[i] = st->index_entries[2 * i];
1882  st->nb_index_entries = i;
1883  }
1884 }
1885 
1886 int ff_add_index_entry(AVIndexEntry **index_entries,
1887  int *nb_index_entries,
1888  unsigned int *index_entries_allocated_size,
1889  int64_t pos, int64_t timestamp,
1890  int size, int distance, int flags)
1891 {
1892  AVIndexEntry *entries, *ie;
1893  int index;
1894 
1895  if ((unsigned) *nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1896  return -1;
1897 
1898  if (timestamp == AV_NOPTS_VALUE)
1899  return AVERROR(EINVAL);
1900 
1901  if (size < 0 || size > 0x3FFFFFFF)
1902  return AVERROR(EINVAL);
1903 
1904  if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1905  timestamp -= RELATIVE_TS_BASE;
1906 
1907  entries = av_fast_realloc(*index_entries,
1908  index_entries_allocated_size,
1909  (*nb_index_entries + 1) *
1910  sizeof(AVIndexEntry));
1911  if (!entries)
1912  return -1;
1913 
1914  *index_entries = entries;
1915 
1916  index = ff_index_search_timestamp(*index_entries, *nb_index_entries,
1917  timestamp, AVSEEK_FLAG_ANY);
1918 
1919  if (index < 0) {
1920  index = (*nb_index_entries)++;
1921  ie = &entries[index];
1922  av_assert0(index == 0 || ie[-1].timestamp < timestamp);
1923  } else {
1924  ie = &entries[index];
1925  if (ie->timestamp != timestamp) {
1926  if (ie->timestamp <= timestamp)
1927  return -1;
1928  memmove(entries + index + 1, entries + index,
1929  sizeof(AVIndexEntry) * (*nb_index_entries - index));
1930  (*nb_index_entries)++;
1931  } else if (ie->pos == pos && distance < ie->min_distance)
1932  // do not reduce the distance
1933  distance = ie->min_distance;
1934  }
1935 
1936  ie->pos = pos;
1937  ie->timestamp = timestamp;
1938  ie->min_distance = distance;
1939  ie->size = size;
1940  ie->flags = flags;
1941 
1942  return index;
1943 }
1944 
1945 int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
1946  int size, int distance, int flags)
1947 {
1948  timestamp = wrap_timestamp(st, timestamp);
1950  &st->index_entries_allocated_size, pos,
1951  timestamp, size, distance, flags);
1952 }
1953 
1954 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1955  int64_t wanted_timestamp, int flags)
1956 {
1957  int a, b, m;
1958  int64_t timestamp;
1959 
1960  a = -1;
1961  b = nb_entries;
1962 
1963  // Optimize appending index entries at the end.
1964  if (b && entries[b - 1].timestamp < wanted_timestamp)
1965  a = b - 1;
1966 
1967  while (b - a > 1) {
1968  m = (a + b) >> 1;
1969 
1970  // Search for the next non-discarded packet.
1971  while ((entries[m].flags & AVINDEX_DISCARD_FRAME) && m < b) {
1972  m++;
1973  if (m == b && entries[m].timestamp >= wanted_timestamp) {
1974  m = b - 1;
1975  break;
1976  }
1977  }
1978 
1979  timestamp = entries[m].timestamp;
1980  if (timestamp >= wanted_timestamp)
1981  b = m;
1982  if (timestamp <= wanted_timestamp)
1983  a = m;
1984  }
1985  m = (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1986 
1987  if (!(flags & AVSEEK_FLAG_ANY))
1988  while (m >= 0 && m < nb_entries &&
1989  !(entries[m].flags & AVINDEX_KEYFRAME))
1990  m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1991 
1992  if (m == nb_entries)
1993  return -1;
1994  return m;
1995 }
1996 
1997 void ff_configure_buffers_for_index(AVFormatContext *s, int64_t time_tolerance)
1998 {
1999  int ist1, ist2;
2000  int64_t pos_delta = 0;
2001  int64_t skip = 0;
2002  //We could use URLProtocol flags here but as many user applications do not use URLProtocols this would be unreliable
2003  const char *proto = avio_find_protocol_name(s->filename);
2004 
2005  if (!proto) {
2006  av_log(s, AV_LOG_INFO,
2007  "Protocol name not provided, cannot determine if input is local or "
2008  "a network protocol, buffers and access patterns cannot be configured "
2009  "optimally without knowing the protocol\n");
2010  }
2011 
2012  if (proto && !(strcmp(proto, "file") && strcmp(proto, "pipe") && strcmp(proto, "cache")))
2013  return;
2014 
2015  for (ist1 = 0; ist1 < s->nb_streams; ist1++) {
2016  AVStream *st1 = s->streams[ist1];
2017  for (ist2 = 0; ist2 < s->nb_streams; ist2++) {
2018  AVStream *st2 = s->streams[ist2];
2019  int i1, i2;
2020 
2021  if (ist1 == ist2)
2022  continue;
2023 
2024  for (i1 = i2 = 0; i1 < st1->nb_index_entries; i1++) {
2025  AVIndexEntry *e1 = &st1->index_entries[i1];
2026  int64_t e1_pts = av_rescale_q(e1->timestamp, st1->time_base, AV_TIME_BASE_Q);
2027 
2028  skip = FFMAX(skip, e1->size);
2029  for (; i2 < st2->nb_index_entries; i2++) {
2030  AVIndexEntry *e2 = &st2->index_entries[i2];
2031  int64_t e2_pts = av_rescale_q(e2->timestamp, st2->time_base, AV_TIME_BASE_Q);
2032  if (e2_pts - e1_pts < time_tolerance)
2033  continue;
2034  pos_delta = FFMAX(pos_delta, e1->pos - e2->pos);
2035  break;
2036  }
2037  }
2038  }
2039  }
2040 
2041  pos_delta *= 2;
2042  /* XXX This could be adjusted depending on protocol*/
2043  if (s->pb->buffer_size < pos_delta && pos_delta < (1<<24)) {
2044  av_log(s, AV_LOG_VERBOSE, "Reconfiguring buffers to size %"PRId64"\n", pos_delta);
2045  ffio_set_buf_size(s->pb, pos_delta);
2046  s->pb->short_seek_threshold = FFMAX(s->pb->short_seek_threshold, pos_delta/2);
2047  }
2048 
2049  if (skip < (1<<23)) {
2051  }
2052 }
2053 
2054 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp, int flags)
2055 {
2057  wanted_timestamp, flags);
2058 }
2059 
2060 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
2061  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
2062 {
2063  int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
2064  if (stream_index >= 0)
2065  ts = wrap_timestamp(s->streams[stream_index], ts);
2066  return ts;
2067 }
2068 
2069 int ff_seek_frame_binary(AVFormatContext *s, int stream_index,
2070  int64_t target_ts, int flags)
2071 {
2072  AVInputFormat *avif = s->iformat;
2073  int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
2074  int64_t ts_min, ts_max, ts;
2075  int index;
2076  int64_t ret;
2077  AVStream *st;
2078 
2079  if (stream_index < 0)
2080  return -1;
2081 
2082  av_log(s, AV_LOG_TRACE, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
2083 
2084  ts_max =
2085  ts_min = AV_NOPTS_VALUE;
2086  pos_limit = -1; // GCC falsely says it may be uninitialized.
2087 
2088  st = s->streams[stream_index];
2089  if (st->index_entries) {
2090  AVIndexEntry *e;
2091 
2092  /* FIXME: Whole function must be checked for non-keyframe entries in
2093  * index case, especially read_timestamp(). */
2094  index = av_index_search_timestamp(st, target_ts,
2095  flags | AVSEEK_FLAG_BACKWARD);
2096  index = FFMAX(index, 0);
2097  e = &st->index_entries[index];
2098 
2099  if (e->timestamp <= target_ts || e->pos == e->min_distance) {
2100  pos_min = e->pos;
2101  ts_min = e->timestamp;
2102  av_log(s, AV_LOG_TRACE, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
2103  pos_min, av_ts2str(ts_min));
2104  } else {
2105  av_assert1(index == 0);
2106  }
2107 
2108  index = av_index_search_timestamp(st, target_ts,
2109  flags & ~AVSEEK_FLAG_BACKWARD);
2110  av_assert0(index < st->nb_index_entries);
2111  if (index >= 0) {
2112  e = &st->index_entries[index];
2113  av_assert1(e->timestamp >= target_ts);
2114  pos_max = e->pos;
2115  ts_max = e->timestamp;
2116  pos_limit = pos_max - e->min_distance;
2117  av_log(s, AV_LOG_TRACE, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64
2118  " dts_max=%s\n", pos_max, pos_limit, av_ts2str(ts_max));
2119  }
2120  }
2121 
2122  pos = ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit,
2123  ts_min, ts_max, flags, &ts, avif->read_timestamp);
2124  if (pos < 0)
2125  return -1;
2126 
2127  /* do the seek */
2128  if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
2129  return ret;
2130 
2132  ff_update_cur_dts(s, st, ts);
2133 
2134  return 0;
2135 }
2136 
2137 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
2138  int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
2139 {
2140  int64_t step = 1024;
2141  int64_t limit, ts_max;
2142  int64_t filesize = avio_size(s->pb);
2143  int64_t pos_max = filesize - 1;
2144  do {
2145  limit = pos_max;
2146  pos_max = FFMAX(0, (pos_max) - step);
2147  ts_max = ff_read_timestamp(s, stream_index,
2148  &pos_max, limit, read_timestamp);
2149  step += step;
2150  } while (ts_max == AV_NOPTS_VALUE && 2*limit > step);
2151  if (ts_max == AV_NOPTS_VALUE)
2152  return -1;
2153 
2154  for (;;) {
2155  int64_t tmp_pos = pos_max + 1;
2156  int64_t tmp_ts = ff_read_timestamp(s, stream_index,
2157  &tmp_pos, INT64_MAX, read_timestamp);
2158  if (tmp_ts == AV_NOPTS_VALUE)
2159  break;
2160  av_assert0(tmp_pos > pos_max);
2161  ts_max = tmp_ts;
2162  pos_max = tmp_pos;
2163  if (tmp_pos >= filesize)
2164  break;
2165  }
2166 
2167  if (ts)
2168  *ts = ts_max;
2169  if (pos)
2170  *pos = pos_max;
2171 
2172  return 0;
2173 }
2174 
2175 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
2176  int64_t pos_min, int64_t pos_max, int64_t pos_limit,
2177  int64_t ts_min, int64_t ts_max,
2178  int flags, int64_t *ts_ret,
2179  int64_t (*read_timestamp)(struct AVFormatContext *, int,
2180  int64_t *, int64_t))
2181 {
2182  int64_t pos, ts;
2183  int64_t start_pos;
2184  int no_change;
2185  int ret;
2186 
2187  av_log(s, AV_LOG_TRACE, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
2188 
2189  if (ts_min == AV_NOPTS_VALUE) {
2190  pos_min = s->internal->data_offset;
2191  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2192  if (ts_min == AV_NOPTS_VALUE)
2193  return -1;
2194  }
2195 
2196  if (ts_min >= target_ts) {
2197  *ts_ret = ts_min;
2198  return pos_min;
2199  }
2200 
2201  if (ts_max == AV_NOPTS_VALUE) {
2202  if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
2203  return ret;
2204  pos_limit = pos_max;
2205  }
2206 
2207  if (ts_max <= target_ts) {
2208  *ts_ret = ts_max;
2209  return pos_max;
2210  }
2211 
2212  av_assert0(ts_min < ts_max);
2213 
2214  no_change = 0;
2215  while (pos_min < pos_limit) {
2216  av_log(s, AV_LOG_TRACE,
2217  "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
2218  pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
2219  av_assert0(pos_limit <= pos_max);
2220 
2221  if (no_change == 0) {
2222  int64_t approximate_keyframe_distance = pos_max - pos_limit;
2223  // interpolate position (better than dichotomy)
2224  pos = av_rescale(target_ts - ts_min, pos_max - pos_min,
2225  ts_max - ts_min) +
2226  pos_min - approximate_keyframe_distance;
2227  } else if (no_change == 1) {
2228  // bisection if interpolation did not change min / max pos last time
2229  pos = (pos_min + pos_limit) >> 1;
2230  } else {
2231  /* linear search if bisection failed, can only happen if there
2232  * are very few or no keyframes between min/max */
2233  pos = pos_min;
2234  }
2235  if (pos <= pos_min)
2236  pos = pos_min + 1;
2237  else if (pos > pos_limit)
2238  pos = pos_limit;
2239  start_pos = pos;
2240 
2241  // May pass pos_limit instead of -1.
2242  ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp);
2243  if (pos == pos_max)
2244  no_change++;
2245  else
2246  no_change = 0;
2247  av_log(s, AV_LOG_TRACE, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s"
2248  " target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
2249  pos_min, pos, pos_max,
2250  av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
2251  pos_limit, start_pos, no_change);
2252  if (ts == AV_NOPTS_VALUE) {
2253  av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
2254  return -1;
2255  }
2256  if (target_ts <= ts) {
2257  pos_limit = start_pos - 1;
2258  pos_max = pos;
2259  ts_max = ts;
2260  }
2261  if (target_ts >= ts) {
2262  pos_min = pos;
2263  ts_min = ts;
2264  }
2265  }
2266 
2267  pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
2268  ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
2269 #if 0
2270  pos_min = pos;
2271  ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2272  pos_min++;
2273  ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
2274  av_log(s, AV_LOG_TRACE, "pos=0x%"PRIx64" %s<=%s<=%s\n",
2275  pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
2276 #endif
2277  *ts_ret = ts;
2278  return pos;
2279 }
2280 
2281 static int seek_frame_byte(AVFormatContext *s, int stream_index,
2282  int64_t pos, int flags)
2283 {
2284  int64_t pos_min, pos_max;
2285 
2286  pos_min = s->internal->data_offset;
2287  pos_max = avio_size(s->pb) - 1;
2288 
2289  if (pos < pos_min)
2290  pos = pos_min;
2291  else if (pos > pos_max)
2292  pos = pos_max;
2293 
2294  avio_seek(s->pb, pos, SEEK_SET);
2295 
2296  s->io_repositioned = 1;
2297 
2298  return 0;
2299 }
2300 
2301 static int seek_frame_generic(AVFormatContext *s, int stream_index,
2302  int64_t timestamp, int flags)
2303 {
2304  int index;
2305  int64_t ret;
2306  AVStream *st;
2307  AVIndexEntry *ie;
2308 
2309  st = s->streams[stream_index];
2310 
2311  index = av_index_search_timestamp(st, timestamp, flags);
2312 
2313  if (index < 0 && st->nb_index_entries &&
2314  timestamp < st->index_entries[0].timestamp)
2315  return -1;
2316 
2317  if (index < 0 || index == st->nb_index_entries - 1) {
2318  AVPacket pkt;
2319  int nonkey = 0;
2320 
2321  if (st->nb_index_entries) {
2323  ie = &st->index_entries[st->nb_index_entries - 1];
2324  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2325  return ret;
2326  ff_update_cur_dts(s, st, ie->timestamp);
2327  } else {
2328  if ((ret = avio_seek(s->pb, s->internal->data_offset, SEEK_SET)) < 0)
2329  return ret;
2330  }
2331  for (;;) {
2332  int read_status;
2333  do {
2334  read_status = av_read_frame(s, &pkt);
2335  } while (read_status == AVERROR(EAGAIN));
2336  if (read_status < 0)
2337  break;
2338  if (stream_index == pkt.stream_index && pkt.dts > timestamp) {
2339  if (pkt.flags & AV_PKT_FLAG_KEY) {
2340  av_packet_unref(&pkt);
2341  break;
2342  }
2343  if (nonkey++ > 1000 && st->codecpar->codec_id != AV_CODEC_ID_CDGRAPHICS) {
2344  av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
2345  av_packet_unref(&pkt);
2346  break;
2347  }
2348  }
2349  av_packet_unref(&pkt);
2350  }
2351  index = av_index_search_timestamp(st, timestamp, flags);
2352  }
2353  if (index < 0)
2354  return -1;
2355 
2357  if (s->iformat->read_seek)
2358  if (s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2359  return 0;
2360  ie = &st->index_entries[index];
2361  if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2362  return ret;
2363  ff_update_cur_dts(s, st, ie->timestamp);
2364 
2365  return 0;
2366 }
2367 
2368 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2369  int64_t timestamp, int flags)
2370 {
2371  int ret;
2372  AVStream *st;
2373 
2374  if (flags & AVSEEK_FLAG_BYTE) {
2375  if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2376  return -1;
2378  return seek_frame_byte(s, stream_index, timestamp, flags);
2379  }
2380 
2381  if (stream_index < 0) {
2382  stream_index = av_find_default_stream_index(s);
2383  if (stream_index < 0)
2384  return -1;
2385 
2386  st = s->streams[stream_index];
2387  /* timestamp for default must be expressed in AV_TIME_BASE units */
2388  timestamp = av_rescale(timestamp, st->time_base.den,
2389  AV_TIME_BASE * (int64_t) st->time_base.num);
2390  }
2391 
2392  /* first, we try the format specific seek */
2393  if (s->iformat->read_seek) {
2395  ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2396  } else
2397  ret = -1;
2398  if (ret >= 0)
2399  return 0;
2400 
2401  if (s->iformat->read_timestamp &&
2402  !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2404  return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2405  } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2407  return seek_frame_generic(s, stream_index, timestamp, flags);
2408  } else
2409  return -1;
2410 }
2411 
2412 int av_seek_frame(AVFormatContext *s, int stream_index,
2413  int64_t timestamp, int flags)
2414 {
2415  int ret;
2416 
2417  if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2418  int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2419  if ((flags & AVSEEK_FLAG_BACKWARD))
2420  max_ts = timestamp;
2421  else
2422  min_ts = timestamp;
2423  return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2424  flags & ~AVSEEK_FLAG_BACKWARD);
2425  }
2426 
2427  ret = seek_frame_internal(s, stream_index, timestamp, flags);
2428 
2429  if (ret >= 0)
2431 
2432  return ret;
2433 }
2434 
2435 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts,
2436  int64_t ts, int64_t max_ts, int flags)
2437 {
2438  if (min_ts > ts || max_ts < ts)
2439  return -1;
2440  if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2441  return AVERROR(EINVAL);
2442 
2443  if (s->seek2any>0)
2444  flags |= AVSEEK_FLAG_ANY;
2445  flags &= ~AVSEEK_FLAG_BACKWARD;
2446 
2447  if (s->iformat->read_seek2) {
2448  int ret;
2450 
2451  if (stream_index == -1 && s->nb_streams == 1) {
2452  AVRational time_base = s->streams[0]->time_base;
2453  ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2454  min_ts = av_rescale_rnd(min_ts, time_base.den,
2455  time_base.num * (int64_t)AV_TIME_BASE,
2457  max_ts = av_rescale_rnd(max_ts, time_base.den,
2458  time_base.num * (int64_t)AV_TIME_BASE,
2460  stream_index = 0;
2461  }
2462 
2463  ret = s->iformat->read_seek2(s, stream_index, min_ts,
2464  ts, max_ts, flags);
2465 
2466  if (ret >= 0)
2468  return ret;
2469  }
2470 
2471  if (s->iformat->read_timestamp) {
2472  // try to seek via read_timestamp()
2473  }
2474 
2475  // Fall back on old API if new is not implemented but old is.
2476  // Note the old API has somewhat different semantics.
2477  if (s->iformat->read_seek || 1) {
2478  int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2479  int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2480  if (ret<0 && ts != min_ts && max_ts != ts) {
2481  ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2482  if (ret >= 0)
2483  ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2484  }
2485  return ret;
2486  }
2487 
2488  // try some generic seek like seek_frame_generic() but with new ts semantics
2489  return -1; //unreachable
2490 }
2491 
2492 int avformat_flush(AVFormatContext *s)
2493 {
2495  return 0;
2496 }
2497 
2498 /*******************************************************/
2499 
2500 /**
2501  * Return TRUE if the stream has accurate duration in any stream.
2502  *
2503  * @return TRUE if the stream has accurate duration for at least one component.
2504  */
2505 static int has_duration(AVFormatContext *ic)
2506 {
2507  int i;
2508  AVStream *st;
2509 
2510  for (i = 0; i < ic->nb_streams; i++) {
2511  st = ic->streams[i];
2512  if (st->duration != AV_NOPTS_VALUE)
2513  return 1;
2514  }
2515  if (ic->duration != AV_NOPTS_VALUE)
2516  return 1;
2517  return 0;
2518 }
2519 
2520 /**
2521  * Estimate the stream timings from the one of each components.
2522  *
2523  * Also computes the global bitrate if possible.
2524  */
2525 static void update_stream_timings(AVFormatContext *ic)
2526 {
2527  int64_t start_time, start_time1, start_time_text, end_time, end_time1, end_time_text;
2528  int64_t duration, duration1, filesize;
2529  int i;
2530  AVStream *st;
2531  AVProgram *p;
2532 
2533  start_time = INT64_MAX;
2534  start_time_text = INT64_MAX;
2535  end_time = INT64_MIN;
2536  end_time_text = INT64_MIN;
2537  duration = INT64_MIN;
2538  for (i = 0; i < ic->nb_streams; i++) {
2539  st = ic->streams[i];
2540  if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2541  start_time1 = av_rescale_q(st->start_time, st->time_base,
2542  AV_TIME_BASE_Q);
2544  if (start_time1 < start_time_text)
2545  start_time_text = start_time1;
2546  } else
2547  start_time = FFMIN(start_time, start_time1);
2548  end_time1 = av_rescale_q_rnd(st->duration, st->time_base,
2551  if (end_time1 != AV_NOPTS_VALUE && (end_time1 > 0 ? start_time1 <= INT64_MAX - end_time1 : start_time1 >= INT64_MIN - end_time1)) {
2552  end_time1 += start_time1;
2554  end_time_text = FFMAX(end_time_text, end_time1);
2555  else
2556  end_time = FFMAX(end_time, end_time1);
2557  }
2558  for (p = NULL; (p = av_find_program_from_stream(ic, p, i)); ) {
2559  if (p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2560  p->start_time = start_time1;
2561  if (p->end_time < end_time1)
2562  p->end_time = end_time1;
2563  }
2564  }
2565  if (st->duration != AV_NOPTS_VALUE) {
2566  duration1 = av_rescale_q(st->duration, st->time_base,
2567  AV_TIME_BASE_Q);
2568  duration = FFMAX(duration, duration1);
2569  }
2570  }
2571  if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2572  start_time = start_time_text;
2573  else if (start_time > start_time_text)
2574  av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2575 
2576  if (end_time == INT64_MIN || (end_time < end_time_text && end_time_text - end_time < AV_TIME_BASE)) {
2577  end_time = end_time_text;
2578  } else if (end_time < end_time_text) {
2579  av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream endtime %f\n", end_time_text / (float)AV_TIME_BASE);
2580  }
2581 
2582  if (start_time != INT64_MAX) {
2583  ic->start_time = start_time;
2584  if (end_time != INT64_MIN) {
2585  if (ic->nb_programs > 1) {
2586  for (i = 0; i < ic->nb_programs; i++) {
2587  p = ic->programs[i];
2588  if (p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2589  duration = FFMAX(duration, p->end_time - p->start_time);
2590  }
2591  } else
2592  duration = FFMAX(duration, end_time - start_time);
2593  }
2594  }
2595  if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2596  ic->duration = duration;
2597  }
2598  if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration > 0) {
2599  /* compute the bitrate */
2600  double bitrate = (double) filesize * 8.0 * AV_TIME_BASE /
2601  (double) ic->duration;
2602  if (bitrate >= 0 && bitrate <= INT64_MAX)
2603  ic->bit_rate = bitrate;
2604  }
2605 }
2606 
2607 static void fill_all_stream_timings(AVFormatContext *ic)
2608 {
2609  int i;
2610  AVStream *st;
2611 
2613  for (i = 0; i < ic->nb_streams; i++) {
2614  st = ic->streams[i];
2615  if (st->start_time == AV_NOPTS_VALUE) {
2616  if (ic->start_time != AV_NOPTS_VALUE)
2618  st->time_base);
2619  if (ic->duration != AV_NOPTS_VALUE)
2621  st->time_base);
2622  }
2623  }
2624 }
2625 
2626 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2627 {
2628  int64_t filesize, duration;
2629  int i, show_warning = 0;
2630  AVStream *st;
2631 
2632  /* if bit_rate is already set, we believe it */
2633  if (ic->bit_rate <= 0) {
2634  int64_t bit_rate = 0;
2635  for (i = 0; i < ic->nb_streams; i++) {
2636  st = ic->streams[i];
2637  if (st->codecpar->bit_rate <= 0 && st->internal->avctx->bit_rate > 0)
2638  st->codecpar->bit_rate = st->internal->avctx->bit_rate;
2639  if (st->codecpar->bit_rate > 0) {
2640  if (INT64_MAX - st->codecpar->bit_rate < bit_rate) {
2641  bit_rate = 0;
2642  break;
2643  }
2644  bit_rate += st->codecpar->bit_rate;
2645  } else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && st->codec_info_nb_frames > 1) {
2646  // If we have a videostream with packets but without a bitrate
2647  // then consider the sum not known
2648  bit_rate = 0;
2649  break;
2650  }
2651  }
2652  ic->bit_rate = bit_rate;
2653  }
2654 
2655  /* if duration is already set, we believe it */
2656  if (ic->duration == AV_NOPTS_VALUE &&
2657  ic->bit_rate != 0) {
2658  filesize = ic->pb ? avio_size(ic->pb) : 0;
2659  if (filesize > ic->internal->data_offset) {
2660  filesize -= ic->internal->data_offset;
2661  for (i = 0; i < ic->nb_streams; i++) {
2662  st = ic->streams[i];
2663  if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2664  && st->duration == AV_NOPTS_VALUE) {
2665  duration = av_rescale(8 * filesize, st->time_base.den,
2666  ic->bit_rate *
2667  (int64_t) st->time_base.num);
2668  st->duration = duration;
2669  show_warning = 1;
2670  }
2671  }
2672  }
2673  }
2674  if (show_warning)
2675  av_log(ic, AV_LOG_WARNING,
2676  "Estimating duration from bitrate, this may be inaccurate\n");
2677 }
2678 
2679 #define DURATION_MAX_READ_SIZE 250000LL
2680 #define DURATION_MAX_RETRY 6
2681 
2682 /* only usable for MPEG-PS streams */
2683 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2684 {
2685  AVPacket pkt1, *pkt = &pkt1;
2686  AVStream *st;
2687  int num, den, read_size, i, ret;
2688  int found_duration = 0;
2689  int is_end;
2690  int64_t filesize, offset, duration;
2691  int retry = 0;
2692 
2693  /* flush packet queue */
2694  flush_packet_queue(ic);
2695 
2696  for (i = 0; i < ic->nb_streams; i++) {
2697  st = ic->streams[i];
2698  if (st->start_time == AV_NOPTS_VALUE &&
2699  st->first_dts == AV_NOPTS_VALUE &&
2701  av_log(ic, AV_LOG_WARNING,
2702  "start time for stream %d is not set in estimate_timings_from_pts\n", i);
2703 
2704  if (st->parser) {
2705  av_parser_close(st->parser);
2706  st->parser = NULL;
2707  }
2708  }
2709 
2710  av_opt_set(ic, "skip_changes", "1", AV_OPT_SEARCH_CHILDREN);
2711  /* estimate the end time (duration) */
2712  /* XXX: may need to support wrapping */
2713  filesize = ic->pb ? avio_size(ic->pb) : 0;
2714  do {
2715  is_end = found_duration;
2716  offset = filesize - (DURATION_MAX_READ_SIZE << retry);
2717  if (offset < 0)
2718  offset = 0;
2719 
2720  avio_seek(ic->pb, offset, SEEK_SET);
2721  read_size = 0;
2722  for (;;) {
2723  if (read_size >= DURATION_MAX_READ_SIZE << (FFMAX(retry - 1, 0)))
2724  break;
2725 
2726  do {
2727  ret = ff_read_packet(ic, pkt);
2728  } while (ret == AVERROR(EAGAIN));
2729  if (ret != 0)
2730  break;
2731  read_size += pkt->size;
2732  st = ic->streams[pkt->stream_index];
2733  if (pkt->pts != AV_NOPTS_VALUE &&
2734  (st->start_time != AV_NOPTS_VALUE ||
2735  st->first_dts != AV_NOPTS_VALUE)) {
2736  if (pkt->duration == 0) {
2737  ff_compute_frame_duration(ic, &num, &den, st, st->parser, pkt);
2738  if (den && num) {
2739  pkt->duration = av_rescale_rnd(1,
2740  num * (int64_t) st->time_base.den,
2741  den * (int64_t) st->time_base.num,
2742  AV_ROUND_DOWN);
2743  }
2744  }
2745  duration = pkt->pts + pkt->duration;
2746  found_duration = 1;
2747  if (st->start_time != AV_NOPTS_VALUE)
2748  duration -= st->start_time;
2749  else
2750  duration -= st->first_dts;
2751  if (duration > 0) {
2752  if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<= 0 ||
2753  (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2754  st->duration = duration;
2755  st->info->last_duration = duration;
2756  }
2757  }
2758  av_packet_unref(pkt);
2759  }
2760 
2761  /* check if all audio/video streams have valid duration */
2762  if (!is_end) {
2763  is_end = 1;
2764  for (i = 0; i < ic->nb_streams; i++) {
2765  st = ic->streams[i];
2766  switch (st->codecpar->codec_type) {
2767  case AVMEDIA_TYPE_VIDEO:
2768  case AVMEDIA_TYPE_AUDIO:
2769  if (st->duration == AV_NOPTS_VALUE)
2770  is_end = 0;
2771  }
2772  }
2773  }
2774  } while (!is_end &&
2775  offset &&
2776  ++retry <= DURATION_MAX_RETRY);
2777 
2778  av_opt_set(ic, "skip_changes", "0", AV_OPT_SEARCH_CHILDREN);
2779 
2780  /* warn about audio/video streams which duration could not be estimated */
2781  for (i = 0; i < ic->nb_streams; i++) {
2782  st = ic->streams[i];
2783  if (st->duration == AV_NOPTS_VALUE) {
2784  switch (st->codecpar->codec_type) {
2785  case AVMEDIA_TYPE_VIDEO:
2786  case AVMEDIA_TYPE_AUDIO:
2787  if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
2788  av_log(ic, AV_LOG_DEBUG, "stream %d : no PTS found at end of file, duration not set\n", i);
2789  } else
2790  av_log(ic, AV_LOG_DEBUG, "stream %d : no TS found at start of file, duration not set\n", i);
2791  }
2792  }
2793  }
2795 
2796  avio_seek(ic->pb, old_offset, SEEK_SET);
2797  for (i = 0; i < ic->nb_streams; i++) {
2798  int j;
2799 
2800  st = ic->streams[i];
2801  st->cur_dts = st->first_dts;
2804  for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
2805  st->pts_buffer[j] = AV_NOPTS_VALUE;
2806  }
2807 }
2808 
2809 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2810 {
2811  int64_t file_size;
2812 
2813  /* get the file size, if possible */
2814  if (ic->iformat->flags & AVFMT_NOFILE) {
2815  file_size = 0;
2816  } else {
2817  file_size = avio_size(ic->pb);
2818  file_size = FFMAX(0, file_size);
2819  }
2820 
2821  if ((!strcmp(ic->iformat->name, "mpeg") ||
2822  !strcmp(ic->iformat->name, "mpegts")) &&
2823  file_size && ic->pb->seekable) {
2824  /* get accurate estimate from the PTSes */
2825  estimate_timings_from_pts(ic, old_offset);
2827  } else if (has_duration(ic)) {
2828  /* at least one component has timings - we use them for all
2829  * the components */
2832  } else {
2833  /* less precise: use bitrate info */
2836  }
2838 
2839  {
2840  int i;
2841  AVStream av_unused *st;
2842  for (i = 0; i < ic->nb_streams; i++) {
2843  st = ic->streams[i];
2844  av_log(ic, AV_LOG_TRACE, "stream %d: start_time: %0.3f duration: %0.3f\n", i,
2845  (double) st->start_time * av_q2d(st->time_base),
2846  (double) st->duration * av_q2d(st->time_base));
2847  }
2848  av_log(ic, AV_LOG_TRACE,
2849  "format: start_time: %0.3f duration: %0.3f bitrate=%"PRId64" kb/s\n",
2850  (double) ic->start_time / AV_TIME_BASE,
2851  (double) ic->duration / AV_TIME_BASE,
2852  (int64_t)ic->bit_rate / 1000);
2853  }
2854 }
2855 
2856 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2857 {
2858  AVCodecContext *avctx = st->internal->avctx;
2859 
2860 #define FAIL(errmsg) do { \
2861  if (errmsg_ptr) \
2862  *errmsg_ptr = errmsg; \
2863  return 0; \
2864  } while (0)
2865 
2866  if ( avctx->codec_id == AV_CODEC_ID_NONE
2867  && avctx->codec_type != AVMEDIA_TYPE_DATA)
2868  FAIL("unknown codec");
2869  switch (avctx->codec_type) {
2870  case AVMEDIA_TYPE_AUDIO:
2871  if (!avctx->frame_size && determinable_frame_size(avctx))
2872  FAIL("unspecified frame size");
2873  if (st->info->found_decoder >= 0 &&
2874  avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2875  FAIL("unspecified sample format");
2876  if (!avctx->sample_rate)
2877  FAIL("unspecified sample rate");
2878  if (!avctx->channels)
2879  FAIL("unspecified number of channels");
2880  if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2881  FAIL("no decodable DTS frames");
2882  break;
2883  case AVMEDIA_TYPE_VIDEO:
2884  if (!avctx->width)
2885  FAIL("unspecified size");
2886  if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2887  FAIL("unspecified pixel format");
2890  FAIL("no frame in rv30/40 and no sar");
2891  break;
2892  case AVMEDIA_TYPE_SUBTITLE:
2893  if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2894  FAIL("unspecified size");
2895  break;
2896  case AVMEDIA_TYPE_DATA:
2897  if (avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2898  }
2899 
2900  return 1;
2901 }
2902 
2903 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2904 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt,
2906 {
2907  AVCodecContext *avctx = st->internal->avctx;
2908  const AVCodec *codec;
2909  int got_picture = 1, ret = 0;
2911  AVSubtitle subtitle;
2912  AVPacket pkt = *avpkt;
2913  int do_skip_frame = 0;
2914  enum AVDiscard skip_frame;
2915 
2916  if (!frame)
2917  return AVERROR(ENOMEM);
2918 
2919  if (!avcodec_is_open(avctx) &&
2920  st->info->found_decoder <= 0 &&
2921  (st->codecpar->codec_id != -st->info->found_decoder || !st->codecpar->codec_id)) {
2922  AVDictionary *thread_opt = NULL;
2923 
2924  codec = find_probe_decoder(s, st, st->codecpar->codec_id);
2925 
2926  if (!codec) {
2927  st->info->found_decoder = -st->codecpar->codec_id;
2928  ret = -1;
2929  goto fail;
2930  }
2931 
2932  /* Force thread count to 1 since the H.264 decoder will not extract
2933  * SPS and PPS to extradata during multi-threaded decoding. */
2934  av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2935  if (s->codec_whitelist)
2936  av_dict_set(options ? options : &thread_opt, "codec_whitelist", s->codec_whitelist, 0);
2937  ret = avcodec_open2(avctx, codec, options ? options : &thread_opt);
2938  if (!options)
2939  av_dict_free(&thread_opt);
2940  if (ret < 0) {
2941  st->info->found_decoder = -avctx->codec_id;
2942  goto fail;
2943  }
2944  st->info->found_decoder = 1;
2945  } else if (!st->info->found_decoder)
2946  st->info->found_decoder = 1;
2947 
2948  if (st->info->found_decoder < 0) {
2949  ret = -1;
2950  goto fail;
2951  }
2952 
2954  do_skip_frame = 1;
2955  skip_frame = avctx->skip_frame;
2956  avctx->skip_frame = AVDISCARD_ALL;
2957  }
2958 
2959  while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2960  ret >= 0 &&
2962  (!st->codec_info_nb_frames &&
2964  got_picture = 0;
2965  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2966  avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2967  ret = avcodec_send_packet(avctx, &pkt);
2968  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
2969  break;
2970  if (ret >= 0)
2971  pkt.size = 0;
2972  ret = avcodec_receive_frame(avctx, frame);
2973  if (ret >= 0)
2974  got_picture = 1;
2975  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
2976  ret = 0;
2977  } else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2978  ret = avcodec_decode_subtitle2(avctx, &subtitle,
2979  &got_picture, &pkt);
2980  if (ret >= 0)
2981  pkt.size = 0;
2982  }
2983  if (ret >= 0) {
2984  if (got_picture)
2985  st->nb_decoded_frames++;
2986  ret = got_picture;
2987  }
2988  }
2989 
2990  if (!pkt.data && !got_picture)
2991  ret = -1;
2992 
2993 fail:
2994  if (do_skip_frame) {
2995  avctx->skip_frame = skip_frame;
2996  }
2997 
2998  av_frame_free(&frame);
2999  return ret;
3000 }
3001 
3002 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
3003 {
3004  while (tags->id != AV_CODEC_ID_NONE) {
3005  if (tags->id == id)
3006  return tags->tag;
3007  tags++;
3008  }
3009  return 0;
3010 }
3011 
3012 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
3013 {
3014  int i;
3015  for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
3016  if (tag == tags[i].tag)
3017  return tags[i].id;
3018  for (i = 0; tags[i].id != AV_CODEC_ID_NONE; i++)
3019  if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
3020  return tags[i].id;
3021  return AV_CODEC_ID_NONE;
3022 }
3023 
3024 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
3025 {
3026  if (bps <= 0 || bps > 64)
3027  return AV_CODEC_ID_NONE;
3028 
3029  if (flt) {
3030  switch (bps) {
3031  case 32:
3033  case 64:
3035  default:
3036  return AV_CODEC_ID_NONE;
3037  }
3038  } else {
3039  bps += 7;
3040  bps >>= 3;
3041  if (sflags & (1 << (bps - 1))) {
3042  switch (bps) {
3043  case 1:
3044  return AV_CODEC_ID_PCM_S8;
3045  case 2:
3047  case 3:
3049  case 4:
3051  case 8:
3053  default:
3054  return AV_CODEC_ID_NONE;
3055  }
3056  } else {
3057  switch (bps) {
3058  case 1:
3059  return AV_CODEC_ID_PCM_U8;
3060  case 2:
3062  case 3:
3064  case 4:
3066  default:
3067  return AV_CODEC_ID_NONE;
3068  }
3069  }
3070  }
3071 }
3072 
3073 unsigned int av_codec_get_tag(const AVCodecTag *const *tags, enum AVCodecID id)
3074 {
3075  unsigned int tag;
3076  if (!av_codec_get_tag2(tags, id, &tag))
3077  return 0;
3078  return tag;
3079 }
3080 
3081 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
3082  unsigned int *tag)
3083 {
3084  int i;
3085  for (i = 0; tags && tags[i]; i++) {
3086  const AVCodecTag *codec_tags = tags[i];
3087  while (codec_tags->id != AV_CODEC_ID_NONE) {
3088  if (codec_tags->id == id) {
3089  *tag = codec_tags->tag;
3090  return 1;
3091  }
3092  codec_tags++;
3093  }
3094  }
3095  return 0;
3096 }
3097 
3098 enum AVCodecID av_codec_get_id(const AVCodecTag *const *tags, unsigned int tag)
3099 {
3100  int i;
3101  for (i = 0; tags && tags[i]; i++) {
3102  enum AVCodecID id = ff_codec_get_id(tags[i], tag);
3103  if (id != AV_CODEC_ID_NONE)
3104  return id;
3105  }
3106  return AV_CODEC_ID_NONE;
3107 }
3108 
3109 static void compute_chapters_end(AVFormatContext *s)
3110 {
3111  unsigned int i, j;
3112  int64_t max_time = 0;
3113 
3114  if (s->duration > 0 && s->start_time < INT64_MAX - s->duration)
3115  max_time = s->duration +
3116  ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
3117 
3118  for (i = 0; i < s->nb_chapters; i++)
3119  if (s->chapters[i]->end == AV_NOPTS_VALUE) {
3120  AVChapter *ch = s->chapters[i];
3121  int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q,
3122  ch->time_base)
3123  : INT64_MAX;
3124 
3125  for (j = 0; j < s->nb_chapters; j++) {
3126  AVChapter *ch1 = s->chapters[j];
3127  int64_t next_start = av_rescale_q(ch1->start, ch1->time_base,
3128  ch->time_base);
3129  if (j != i && next_start > ch->start && next_start < end)
3130  end = next_start;
3131  }
3132  ch->end = (end == INT64_MAX) ? ch->start : end;
3133  }
3134 }
3135 
3136 static int get_std_framerate(int i)
3137 {
3138  if (i < 30*12)
3139  return (i + 1) * 1001;
3140  i -= 30*12;
3141 
3142  if (i < 30)
3143  return (i + 31) * 1001 * 12;
3144  i -= 30;
3145 
3146  if (i < 3)
3147  return ((const int[]) { 80, 120, 240})[i] * 1001 * 12;
3148 
3149  i -= 3;
3150 
3151  return ((const int[]) { 24, 30, 60, 12, 15, 48 })[i] * 1000 * 12;
3152 }
3153 
3154 /* Is the time base unreliable?
3155  * This is a heuristic to balance between quick acceptance of the values in
3156  * the headers vs. some extra checks.
3157  * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
3158  * MPEG-2 commonly misuses field repeat flags to store different framerates.
3159  * And there are "variable" fps files this needs to detect as well. */
3161 {
3162  if (c->time_base.den >= 101LL * c->time_base.num ||
3163  c->time_base.den < 5LL * c->time_base.num ||
3164  // c->codec_tag == AV_RL32("DIVX") ||
3165  // c->codec_tag == AV_RL32("XVID") ||
3166  c->codec_tag == AV_RL32("mp4v") ||
3168  c->codec_id == AV_CODEC_ID_GIF ||
3169  c->codec_id == AV_CODEC_ID_HEVC ||
3170  c->codec_id == AV_CODEC_ID_H264)
3171  return 1;
3172  return 0;
3173 }
3174 
3176 {
3177  int ret;
3178 
3180  par->extradata = NULL;
3181  par->extradata_size = 0;
3182  return AVERROR(EINVAL);
3183  }
3185  if (par->extradata) {
3186  memset(par->extradata + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
3187  par->extradata_size = size;
3188  ret = 0;
3189  } else {
3190  par->extradata_size = 0;
3191  ret = AVERROR(ENOMEM);
3192  }
3193  return ret;
3194 }
3195 
3196 int ff_get_extradata(AVFormatContext *s, AVCodecParameters *par, AVIOContext *pb, int size)
3197 {
3198  int ret = ff_alloc_extradata(par, size);
3199  if (ret < 0)
3200  return ret;
3201  ret = avio_read(pb, par->extradata, size);
3202  if (ret != size) {
3203  av_freep(&par->extradata);
3204  par->extradata_size = 0;
3205  av_log(s, AV_LOG_ERROR, "Failed to read extradata of size %d\n", size);
3206  return ret < 0 ? ret : AVERROR_INVALIDDATA;
3207  }
3208 
3209  return ret;
3210 }
3211 
3212 int ff_rfps_add_frame(AVFormatContext *ic, AVStream *st, int64_t ts)
3213 {
3214  int i, j;
3215  int64_t last = st->info->last_dts;
3216 
3217  if ( ts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && ts > last
3218  && ts - (uint64_t)last < INT64_MAX) {
3219  double dts = (is_relative(ts) ? ts - RELATIVE_TS_BASE : ts) * av_q2d(st->time_base);
3220  int64_t duration = ts - last;
3221 
3222  if (!st->info->duration_error)
3223  st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
3224  if (!st->info->duration_error)
3225  return AVERROR(ENOMEM);
3226 
3227 // if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
3228 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
3229  for (i = 0; i<MAX_STD_TIMEBASES; i++) {
3230  if (st->info->duration_error[0][1][i] < 1e10) {
3231  int framerate = get_std_framerate(i);
3232  double sdts = dts*framerate/(1001*12);
3233  for (j= 0; j<2; j++) {
3234  int64_t ticks = llrint(sdts+j*0.5);
3235  double error= sdts - ticks + j*0.5;
3236  st->info->duration_error[j][0][i] += error;
3237  st->info->duration_error[j][1][i] += error*error;
3238  }
3239  }
3240  }
3241  st->info->duration_count++;
3243 
3244  if (st->info->duration_count % 10 == 0) {
3245  int n = st->info->duration_count;
3246  for (i = 0; i<MAX_STD_TIMEBASES; i++) {
3247  if (st->info->duration_error[0][1][i] < 1e10) {
3248  double a0 = st->info->duration_error[0][0][i] / n;
3249  double error0 = st->info->duration_error[0][1][i] / n - a0*a0;
3250  double a1 = st->info->duration_error[1][0][i] / n;
3251  double error1 = st->info->duration_error[1][1][i] / n - a1*a1;
3252  if (error0 > 0.04 && error1 > 0.04) {
3253  st->info->duration_error[0][1][i] = 2e10;
3254  st->info->duration_error[1][1][i] = 2e10;
3255  }
3256  }
3257  }
3258  }
3259 
3260  // ignore the first 4 values, they might have some random jitter
3261  if (st->info->duration_count > 3 && is_relative(ts) == is_relative(last))
3262  st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
3263  }
3264  if (ts != AV_NOPTS_VALUE)
3265  st->info->last_dts = ts;
3266 
3267  return 0;
3268 }
3269 
3270 void ff_rfps_calculate(AVFormatContext *ic)
3271 {
3272  int i, j;
3273 
3274  for (i = 0; i < ic->nb_streams; i++) {
3275  AVStream *st = ic->streams[i];
3276 
3278  continue;
3279  // the check for tb_unreliable() is not completely correct, since this is not about handling
3280  // an unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3281  // ipmovie.c produces.
3282  if (tb_unreliable(st->internal->avctx) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3283  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3284  if (st->info->duration_count>1 && !st->r_frame_rate.num
3285  && tb_unreliable(st->internal->avctx)) {
3286  int num = 0;
3287  double best_error= 0.01;
3288  AVRational ref_rate = st->r_frame_rate.num ? st->r_frame_rate : av_inv_q(st->time_base);
3289 
3290  for (j= 0; j<MAX_STD_TIMEBASES; j++) {
3291  int k;
3292 
3293  if (st->info->codec_info_duration &&
3294  st->info->codec_info_duration*av_q2d(st->time_base) < (1001*11.5)/get_std_framerate(j))
3295  continue;
3296  if (!st->info->codec_info_duration && get_std_framerate(j) < 1001*12)
3297  continue;
3298 
3299  if (av_q2d(st->time_base) * st->info->rfps_duration_sum / st->info->duration_count < (1001*12.0 * 0.8)/get_std_framerate(j))
3300  continue;
3301 
3302  for (k= 0; k<2; k++) {
3303  int n = st->info->duration_count;
3304  double a= st->info->duration_error[k][0][j] / n;
3305  double error= st->info->duration_error[k][1][j]/n - a*a;
3306 
3307  if (error < best_error && best_error> 0.000000001) {
3308  best_error= error;
3309  num = get_std_framerate(j);
3310  }
3311  if (error < 0.02)
3312  av_log(ic, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3313  }
3314  }
3315  // do not increase frame rate by more than 1 % in order to match a standard rate.
3316  if (num && (!ref_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(ref_rate)))
3317  av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3318  }
3319  if ( !st->avg_frame_rate.num
3320  && st->r_frame_rate.num && st->info->rfps_duration_sum
3321  && st->info->codec_info_duration <= 0
3322  && st->info->duration_count > 2
3323  && fabs(1.0 / (av_q2d(st->r_frame_rate) * av_q2d(st->time_base)) - st->info->rfps_duration_sum / (double)st->info->duration_count) <= 1.0
3324  ) {
3325  av_log(ic, AV_LOG_DEBUG, "Setting avg frame rate based on r frame rate\n");
3326  st->avg_frame_rate = st->r_frame_rate;
3327  }
3328 
3329  av_freep(&st->info->duration_error);
3330  st->info->last_dts = AV_NOPTS_VALUE;
3331  st->info->duration_count = 0;
3332  st->info->rfps_duration_sum = 0;
3333  }
3334 }
3335 
3337 {
3338  int i, count = 0, ret = 0, j;
3339  int64_t read_size;
3340  AVStream *st;
3341  AVCodecContext *avctx;
3342  AVPacket pkt1, *pkt;
3343  int64_t old_offset = avio_tell(ic->pb);
3344  // new streams might appear, no options for those
3345  int orig_nb_streams = ic->nb_streams;
3346  int flush_codecs;
3347  int64_t max_analyze_duration = ic->max_analyze_duration;
3348  int64_t max_stream_analyze_duration;
3349  int64_t max_subtitle_analyze_duration;
3350  int64_t probesize = ic->probesize;
3351  int eof_reached = 0;
3352 
3353  flush_codecs = probesize > 0;
3354 
3355  av_opt_set(ic, "skip_clear", "1", AV_OPT_SEARCH_CHILDREN);
3356 
3357  max_stream_analyze_duration = max_analyze_duration;
3358  max_subtitle_analyze_duration = max_analyze_duration;
3359  if (!max_analyze_duration) {
3360  max_stream_analyze_duration =
3361  max_analyze_duration = 5*AV_TIME_BASE;
3362  max_subtitle_analyze_duration = 30*AV_TIME_BASE;
3363  if (!strcmp(ic->iformat->name, "flv"))
3364  max_stream_analyze_duration = 90*AV_TIME_BASE;
3365  if (!strcmp(ic->iformat->name, "mpeg") || !strcmp(ic->iformat->name, "mpegts"))
3366  max_stream_analyze_duration = 7*AV_TIME_BASE;
3367  }
3368 
3369  if (ic->pb)
3370  av_log(ic, AV_LOG_DEBUG, "Before avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d nb_streams:%d\n",
3371  avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, ic->nb_streams);
3372 
3373  for (i = 0; i < ic->nb_streams; i++) {
3374  const AVCodec *codec;
3375  AVDictionary *thread_opt = NULL;
3376  st = ic->streams[i];
3377  avctx = st->internal->avctx;
3378 
3379  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
3381 /* if (!st->time_base.num)
3382  st->time_base = */
3383  if (!avctx->time_base.num)
3384  avctx->time_base = st->time_base;
3385  }
3386 
3387  /* check if the caller has overridden the codec id */
3388 #if FF_API_LAVF_AVCTX
3390  if (st->codec->codec_id != st->internal->orig_codec_id) {
3391  st->codecpar->codec_id = st->codec->codec_id;
3392  st->codecpar->codec_type = st->codec->codec_type;
3393  st->internal->orig_codec_id = st->codec->codec_id;
3394  }
3396 #endif
3397  // only for the split stuff
3398  if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE) && st->request_probe <= 0) {
3399  st->parser = av_parser_init(st->codecpar->codec_id);
3400  if (st->parser) {
3401  if (st->need_parsing == AVSTREAM_PARSE_HEADERS) {
3403  } else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
3405  }
3406  } else if (st->need_parsing) {
3407  av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
3408  "%s, packets or times may be invalid.\n",
3410  }
3411  }
3412 
3413  if (st->codecpar->codec_id != st->internal->orig_codec_id)
3415 
3416  ret = avcodec_parameters_to_context(avctx, st->codecpar);
3417  if (ret < 0)
3418  goto find_stream_info_err;
3419  if (st->request_probe <= 0)
3420  st->internal->avctx_inited = 1;
3421 
3422  codec = find_probe_decoder(ic, st, st->codecpar->codec_id);
3423 
3424  /* Force thread count to 1 since the H.264 decoder will not extract
3425  * SPS and PPS to extradata during multi-threaded decoding. */
3426  av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
3427 
3428  if (ic->codec_whitelist)
3429  av_dict_set(options ? &options[i] : &thread_opt, "codec_whitelist", ic->codec_whitelist, 0);
3430 
3431  /* Ensure that subtitle_header is properly set. */
3433  && codec && !avctx->codec) {
3434  if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0)
3435  av_log(ic, AV_LOG_WARNING,
3436  "Failed to open codec in av_find_stream_info\n");
3437  }
3438 
3439  // Try to just open decoders, in case this is enough to get parameters.
3440  if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
3441  if (codec && !avctx->codec)
3442  if (avcodec_open2(avctx, codec, options ? &options[i] : &thread_opt) < 0)
3443  av_log(ic, AV_LOG_WARNING,
3444  "Failed to open codec in av_find_stream_info\n");
3445  }
3446  if (!options)
3447  av_dict_free(&thread_opt);
3448  }
3449 
3450  for (i = 0; i < ic->nb_streams; i++) {
3451 #if FF_API_R_FRAME_RATE
3452  ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
3453 #endif
3456  }
3457 
3458  read_size = 0;
3459  for (;;) {
3460  int analyzed_all_streams;
3462  ret = AVERROR_EXIT;
3463  av_log(ic, AV_LOG_DEBUG, "interrupted\n");
3464  break;
3465  }
3466 
3467  /* check if one codec still needs to be handled */
3468  for (i = 0; i < ic->nb_streams; i++) {
3469  int fps_analyze_framecount = 20;
3470 
3471  st = ic->streams[i];
3472  if (!has_codec_parameters(st, NULL))
3473  break;
3474  /* If the timebase is coarse (like the usual millisecond precision
3475  * of mkv), we need to analyze more frames to reliably arrive at
3476  * the correct fps. */
3477  if (av_q2d(st->time_base) > 0.0005)
3478  fps_analyze_framecount *= 2;
3479  if (!tb_unreliable(st->internal->avctx))
3480  fps_analyze_framecount = 0;
3481  if (ic->fps_probe_size >= 0)
3482  fps_analyze_framecount = ic->fps_probe_size;
3484  fps_analyze_framecount = 0;
3485  /* variable fps and no guess at the real fps */
3486  if (!(st->r_frame_rate.num && st->avg_frame_rate.num) &&
3488  int count = (ic->iformat->flags & AVFMT_NOTIMESTAMPS) ?
3490  st->info->duration_count;
3491  if (count < fps_analyze_framecount)
3492  break;
3493  }
3494  if (st->parser && st->parser->parser->split &&
3495  !st->internal->avctx->extradata)
3496  break;
3497  if (st->first_dts == AV_NOPTS_VALUE &&
3498  !(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&
3502  break;
3503  }
3504  analyzed_all_streams = 0;
3505  if (i == ic->nb_streams) {
3506  analyzed_all_streams = 1;
3507  /* NOTE: If the format has no header, then we need to read some
3508  * packets to get most of the streams, so we cannot stop here. */
3509  if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
3510  /* If we found the info for all the codecs, we can stop. */
3511  ret = count;
3512  av_log(ic, AV_LOG_DEBUG, "All info found\n");
3513  flush_codecs = 0;
3514  break;
3515  }
3516  }
3517  /* We did not get all the codec info, but we read too much data. */
3518  if (read_size >= probesize) {
3519  ret = count;
3520  av_log(ic, AV_LOG_DEBUG,
3521  "Probe buffer size limit of %"PRId64" bytes reached\n", probesize);
3522  for (i = 0; i < ic->nb_streams; i++)
3523  if (!ic->streams[i]->r_frame_rate.num &&
3524  ic->streams[i]->info->duration_count <= 1 &&
3526  strcmp(ic->iformat->name, "image2"))
3527  av_log(ic, AV_LOG_WARNING,
3528  "Stream #%d: not enough frames to estimate rate; "
3529  "consider increasing probesize\n", i);
3530  break;
3531  }
3532 
3533  /* NOTE: A new stream can be added there if no header in file
3534  * (AVFMTCTX_NOHEADER). */
3535  ret = read_frame_internal(ic, &pkt1);
3536  if (ret == AVERROR(EAGAIN))
3537  continue;
3538 
3539  if (ret < 0) {
3540  /* EOF or error*/
3541  eof_reached = 1;
3542  break;
3543  }
3544 
3545  pkt = &pkt1;
3546 
3547  if (!(ic->flags & AVFMT_FLAG_NOBUFFER)) {
3548  ret = add_to_pktbuf(&ic->internal->packet_buffer, pkt,
3549  &ic->internal->packet_buffer_end, 0);
3550  if (ret < 0)
3551  goto find_stream_info_err;
3552  }
3553 
3554  st = ic->streams[pkt->stream_index];
3556  read_size += pkt->size;
3557 
3558  avctx = st->internal->avctx;
3559  if (!st->internal->avctx_inited) {
3560  ret = avcodec_parameters_to_context(avctx, st->codecpar);
3561  if (ret < 0)
3562  goto find_stream_info_err;
3563  st->internal->avctx_inited = 1;
3564  }
3565 
3566  if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
3567  /* check for non-increasing dts */
3568  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3569  st->info->fps_last_dts >= pkt->dts) {
3570  av_log(ic, AV_LOG_DEBUG,
3571  "Non-increasing DTS in stream %d: packet %d with DTS "
3572  "%"PRId64", packet %d with DTS %"PRId64"\n",
3573  st->index, st->info->fps_last_dts_idx,
3575  pkt->dts);
3576  st->info->fps_first_dts =
3578  }
3579  /* Check for a discontinuity in dts. If the difference in dts
3580  * is more than 1000 times the average packet duration in the
3581  * sequence, we treat it as a discontinuity. */
3582  if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
3584  (pkt->dts - st->info->fps_last_dts) / 1000 >
3585  (st->info->fps_last_dts - st->info->fps_first_dts) /
3586  (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
3587  av_log(ic, AV_LOG_WARNING,
3588  "DTS discontinuity in stream %d: packet %d with DTS "
3589  "%"PRId64", packet %d with DTS %"PRId64"\n",
3590  st->index, st->info->fps_last_dts_idx,
3592  pkt->dts);
3593  st->info->fps_first_dts =
3595  }
3596 
3597  /* update stored dts values */
3598  if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
3599  st->info->fps_first_dts = pkt->dts;
3601  }
3602  st->info->fps_last_dts = pkt->dts;
3604  }
3605  if (st->codec_info_nb_frames>1) {
3606  int64_t t = 0;
3607  int64_t limit;
3608 
3609  if (st->time_base.den > 0)
3611  if (st->avg_frame_rate.num > 0)
3613 
3614  if ( t == 0
3615  && st->codec_info_nb_frames>30
3616  && st->info->fps_first_dts != AV_NOPTS_VALUE
3617  && st->info->fps_last_dts != AV_NOPTS_VALUE)
3619 
3620  if (analyzed_all_streams) limit = max_analyze_duration;
3621  else if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) limit = max_subtitle_analyze_duration;
3622  else limit = max_stream_analyze_duration;
3623 
3624  if (t >= limit) {
3625  av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %"PRId64" reached at %"PRId64" microseconds st:%d\n",
3626  limit,
3627  t, pkt->stream_index);
3628  if (ic->flags & AVFMT_FLAG_NOBUFFER)
3629  av_packet_unref(pkt);
3630  break;
3631  }
3632  if (pkt->duration) {
3633  if (avctx->codec_type == AVMEDIA_TYPE_SUBTITLE && pkt->pts != AV_NOPTS_VALUE && pkt->pts >= st->start_time) {
3634  st->info->codec_info_duration = FFMIN(pkt->pts - st->start_time, st->info->codec_info_duration + pkt->duration);
3635  } else
3636  st->info->codec_info_duration += pkt->duration;
3637  st->info->codec_info_duration_fields += st->parser && st->need_parsing && avctx->ticks_per_frame ==2 ? st->parser->repeat_pict + 1 : 2;
3638  }
3639  }
3640 #if FF_API_R_FRAME_RATE
3642  ff_rfps_add_frame(ic, st, pkt->dts);
3643 #endif
3644  if (st->parser && st->parser->parser->split && !avctx->extradata) {
3645  int i = st->parser->parser->split(avctx, pkt->data, pkt->size);
3646  if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
3647  avctx->extradata_size = i;
3648  avctx->extradata = av_mallocz(avctx->extradata_size +
3650  if (!avctx->extradata)
3651  return AVERROR(ENOMEM);
3652  memcpy(avctx->extradata, pkt->data,
3653  avctx->extradata_size);
3654  }
3655  }
3656 
3657  /* If still no information, we try to open the codec and to
3658  * decompress the frame. We try to avoid that in most cases as
3659  * it takes longer and uses more memory. For MPEG-4, we need to
3660  * decompress for QuickTime.
3661  *
3662  * If AV_CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
3663  * least one frame of codec data, this makes sure the codec initializes
3664  * the channel configuration and does not only trust the values from
3665  * the container. */
3666  try_decode_frame(ic, st, pkt,
3667  (options && i < orig_nb_streams) ? &options[i] : NULL);
3668 
3669  if (ic->flags & AVFMT_FLAG_NOBUFFER)
3670  av_packet_unref(pkt);
3671 
3672  st->codec_info_nb_frames++;
3673  count++;
3674  }
3675 
3676  if (eof_reached) {
3677  int stream_index;
3678  for (stream_index = 0; stream_index < ic->nb_streams; stream_index++) {
3679  st = ic->streams[stream_index];
3680  avctx = st->internal->avctx;
3681  if (!has_codec_parameters(st, NULL)) {
3682  const AVCodec *codec = find_probe_decoder(ic, st, st->codecpar->codec_id);
3683  if (codec && !avctx->codec) {
3684  if (avcodec_open2(avctx, codec, (options && stream_index < orig_nb_streams) ? &options[stream_index] : NULL) < 0)
3685  av_log(ic, AV_LOG_WARNING,
3686  "Failed to open codec in av_find_stream_info\n");
3687  }
3688  }
3689 
3690  // EOF already reached while reading the stream above.
3691  // So continue with reoordering DTS with whatever delay we have.
3693  update_dts_from_pts(ic, stream_index, ic->internal->packet_buffer);
3694  }
3695  }
3696  }
3697 
3698  if (flush_codecs) {
3699  AVPacket empty_pkt = { 0 };
3700  int err = 0;
3701  av_init_packet(&empty_pkt);
3702 
3703  for (i = 0; i < ic->nb_streams; i++) {
3704 
3705  st = ic->streams[i];
3706 
3707  /* flush the decoders */
3708  if (st->info->found_decoder == 1) {
3709  do {
3710  err = try_decode_frame(ic, st, &empty_pkt,
3711  (options && i < orig_nb_streams)
3712  ? &options[i] : NULL);
3713  } while (err > 0 && !has_codec_parameters(st, NULL));
3714 
3715  if (err < 0) {
3716  av_log(ic, AV_LOG_INFO,
3717  "decoding for stream %d failed\n", st->index);
3718  }
3719  }
3720  }
3721  }
3722 
3723  // close codecs which were opened in try_decode_frame()
3724  for (i = 0; i < ic->nb_streams; i++) {
3725  st = ic->streams[i];
3727  }
3728 
3729  ff_rfps_calculate(ic);
3730 
3731  for (i = 0; i < ic->nb_streams; i++) {
3732  st = ic->streams[i];
3733  avctx = st->internal->avctx;
3734  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3735  if (avctx->codec_id == AV_CODEC_ID_RAWVIDEO && !avctx->codec_tag && !avctx->bits_per_coded_sample) {
3736  uint32_t tag= avcodec_pix_fmt_to_codec_tag(avctx->pix_fmt);
3738  avctx->codec_tag= tag;
3739  }
3740 
3741  /* estimate average framerate if not set by demuxer */
3742  if (st->info->codec_info_duration_fields &&
3743  !st->avg_frame_rate.num &&
3744  st->info->codec_info_duration) {
3745  int best_fps = 0;
3746  double best_error = 0.01;
3747 
3748  if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3749  st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3750  st->info->codec_info_duration < 0)
3751  continue;
3753  st->info->codec_info_duration_fields * (int64_t) st->time_base.den,
3754  st->info->codec_info_duration * 2 * (int64_t) st->time_base.num, 60000);
3755 
3756  /* Round guessed framerate to a "standard" framerate if it's
3757  * within 1% of the original estimate. */
3758  for (j = 0; j < MAX_STD_TIMEBASES; j++) {
3759  AVRational std_fps = { get_std_framerate(j), 12 * 1001 };
3760  double error = fabs(av_q2d(st->avg_frame_rate) /
3761  av_q2d(std_fps) - 1);
3762 
3763  if (error < best_error) {
3764  best_error = error;
3765  best_fps = std_fps.num;
3766  }
3767  }
3768  if (best_fps)
3770  best_fps, 12 * 1001, INT_MAX);
3771  }
3772 
3773  if (!st->r_frame_rate.num) {
3774  if ( avctx->time_base.den * (int64_t) st->time_base.num
3775  <= avctx->time_base.num * avctx->ticks_per_frame * (int64_t) st->time_base.den) {
3776  st->r_frame_rate.num = avctx->time_base.den;
3777  st->r_frame_rate.den = avctx->time_base.num * avctx->ticks_per_frame;
3778  } else {
3779  st->r_frame_rate.num = st->time_base.den;
3780  st->r_frame_rate.den = st->time_base.num;
3781  }
3782  }
3784  AVRational hw_ratio = { avctx->height, avctx->width };
3786  hw_ratio);
3787  }
3788  } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
3789  if (!avctx->bits_per_coded_sample)
3790  avctx->bits_per_coded_sample =
3792  // set stream disposition based on audio service type
3793  switch (avctx->audio_service_type) {
3796  break;
3799  break;
3802  break;
3805  break;
3808  break;
3809  }
3810  }
3811  }
3812 
3813  if (probesize)
3814  estimate_timings(ic, old_offset);
3815 
3816  av_opt_set(ic, "skip_clear", "0", AV_OPT_SEARCH_CHILDREN);
3817 
3818  if (ret >= 0 && ic->nb_streams)
3819  /* We could not have all the codec parameters before EOF. */
3820  ret = -1;
3821  for (i = 0; i < ic->nb_streams; i++) {
3822  const char *errmsg;
3823  st = ic->streams[i];
3824 
3825  /* if no packet was ever seen, update context now for has_codec_parameters */
3826  if (!st->internal->avctx_inited) {
3827  if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
3829  st->codecpar->format = st->internal->avctx->sample_fmt;
3831  if (ret < 0)
3832  goto find_stream_info_err;
3833  }
3834  if (!has_codec_parameters(st, &errmsg)) {
3835  char buf[256];
3836  avcodec_string(buf, sizeof(buf), st->internal->avctx, 0);
3837  av_log(ic, AV_LOG_WARNING,
3838  "Could not find codec parameters for stream %d (%s): %s\n"
3839  "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3840  i, buf, errmsg);
3841  } else {
3842  ret = 0;
3843  }
3844  }
3845 
3847 
3848  /* update the stream parameters from the internal codec contexts */
3849  for (i = 0; i < ic->nb_streams; i++) {
3850  st = ic->streams[i];
3851 
3852  if (st->internal->avctx_inited) {
3853  int orig_w = st->codecpar->width;
3854  int orig_h = st->codecpar->height;
3856  if (ret < 0)
3857  goto find_stream_info_err;
3858  // The decoder might reduce the video size by the lowres factor.
3859  if (av_codec_get_lowres(st->internal->avctx) && orig_w) {
3860  st->codecpar->width = orig_w;
3861  st->codecpar->height = orig_h;
3862  }
3863  }
3864 
3865 #if FF_API_LAVF_AVCTX
3867  ret = avcodec_parameters_to_context(st->codec, st->codecpar);
3868  if (ret < 0)
3869  goto find_stream_info_err;
3870 
3871  // The old API (AVStream.codec) "requires" the resolution to be adjusted
3872  // by the lowres factor.
3873  if (av_codec_get_lowres(st->internal->avctx) && st->internal->avctx->width) {
3875  st->codec->width = st->internal->avctx->width;
3876  st->codec->height = st->internal->avctx->height;
3877  }
3878 
3879  if (st->codec->codec_tag != MKTAG('t','m','c','d')) {
3880  st->codec->time_base = st->internal->avctx->time_base;
3881  st->codec->ticks_per_frame = st->internal->avctx->ticks_per_frame;
3882  }
3883  st->codec->framerate = st->avg_frame_rate;
3884 
3885  if (st->internal->avctx->subtitle_header) {
3886  st->codec->subtitle_header = av_malloc(st->internal->avctx->subtitle_header_size);
3887  if (!st->codec->subtitle_header)
3888  goto find_stream_info_err;
3889  st->codec->subtitle_header_size = st->internal->avctx->subtitle_header_size;
3890  memcpy(st->codec->subtitle_header, st->internal->avctx->subtitle_header,
3891  st->codec->subtitle_header_size);
3892  }
3893 
3894  // Fields unavailable in AVCodecParameters
3895  st->codec->coded_width = st->internal->avctx->coded_width;
3896  st->codec->coded_height = st->internal->avctx->coded_height;
3897  st->codec->properties = st->internal->avctx->properties;
3899 #endif
3900 
3901  st->internal->avctx_inited = 0;
3902  }
3903 
3904 find_stream_info_err:
3905  for (i = 0; i < ic->nb_streams; i++) {
3906  st = ic->streams[i];
3907  if (st->info)
3908  av_freep(&st->info->duration_error);
3909  av_freep(&ic->streams[i]->info);
3910  }
3911  if (ic->pb)
3912  av_log(ic, AV_LOG_DEBUG, "After avformat_find_stream_info() pos: %"PRId64" bytes read:%"PRId64" seeks:%d frames:%d\n",
3913  avio_tell(ic->pb), ic->pb->bytes_read, ic->pb->seek_count, count);
3914  return ret;
3915 }
3916 
3917 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3918 {
3919  int i, j;
3920 
3921  for (i = 0; i < ic->nb_programs; i++) {
3922  if (ic->programs[i] == last) {
3923  last = NULL;
3924  } else {
3925  if (!last)
3926  for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3927  if (ic->programs[i]->stream_index[j] == s)
3928  return ic->programs[i];
3929  }
3930  }
3931  return NULL;
3932 }
3933 
3934 int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type,
3935  int wanted_stream_nb, int related_stream,
3936  AVCodec **decoder_ret, int flags)
3937 {
3938  int i, nb_streams = ic->nb_streams;
3939  int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3940  unsigned *program = NULL;
3941  const AVCodec *decoder = NULL, *best_decoder = NULL;
3942 
3943  if (related_stream >= 0 && wanted_stream_nb < 0) {
3944  AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3945  if (p) {
3946  program = p->stream_index;
3947  nb_streams = p->nb_stream_indexes;
3948  }
3949  }
3950  for (i = 0; i < nb_streams; i++) {
3951  int real_stream_index = program ? program[i] : i;
3952  AVStream *st = ic->streams[real_stream_index];
3953  AVCodecParameters *par = st->codecpar;
3954  if (par->codec_type != type)
3955  continue;
3956  if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3957  continue;
3958  if (wanted_stream_nb != real_stream_index &&
3961  continue;
3962  if (type == AVMEDIA_TYPE_AUDIO && !(par->channels && par->sample_rate))
3963  continue;
3964  if (decoder_ret) {
3965  decoder = find_decoder(ic, st, par->codec_id);
3966  if (!decoder) {
3967  if (ret < 0)
3969  continue;
3970  }
3971  }
3973  bitrate = par->bit_rate;
3974  multiframe = FFMIN(5, count);
3975  if ((best_multiframe > multiframe) ||
3976  (best_multiframe == multiframe && best_bitrate > bitrate) ||
3977  (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3978  continue;
3979  best_count = count;
3980  best_bitrate = bitrate;
3981  best_multiframe = multiframe;
3982  ret = real_stream_index;
3983  best_decoder = decoder;
3984  if (program && i == nb_streams - 1 && ret < 0) {
3985  program = NULL;
3986  nb_streams = ic->nb_streams;
3987  /* no related stream found, try again with everything */
3988  i = 0;
3989  }
3990  }
3991  if (decoder_ret)
3992  *decoder_ret = (AVCodec*)best_decoder;
3993  return ret;
3994 }
3995 
3996 /*******************************************************/
3997 
3998 int av_read_play(AVFormatContext *s)
3999 {
4000  if (s->iformat->read_play)
4001  return s->iformat->read_play(s);
4002  if (s->pb)
4003  return avio_pause(s->pb, 0);
4004  return AVERROR(ENOSYS);
4005 }
4006 
4007 int av_read_pause(AVFormatContext *s)
4008 {
4009  if (s->iformat->read_pause)
4010  return s->iformat->read_pause(s);
4011  if (s->pb)
4012  return avio_pause(s->pb, 1);
4013  return AVERROR(ENOSYS);
4014 }
4015 
4016 int ff_stream_encode_params_copy(AVStream *dst, const AVStream *src)
4017 {
4018  int ret, i;
4019 
4020  dst->id = src->id;
4021  dst->time_base = src->time_base;
4022  dst->nb_frames = src->nb_frames;
4023  dst->disposition = src->disposition;
4025  dst->avg_frame_rate = src->avg_frame_rate;
4026  dst->r_frame_rate = src->r_frame_rate;
4027 
4028  av_dict_free(&dst->metadata);
4029  ret = av_dict_copy(&dst->metadata, src->metadata, 0);
4030  if (ret < 0)
4031  return ret;
4032 
4033  ret = avcodec_parameters_copy(dst->codecpar, src->codecpar);
4034  if (ret < 0)
4035  return ret;
4036 
4037  /* Free existing side data*/
4038  for (i = 0; i < dst->nb_side_data; i++)
4039  av_free(dst->side_data[i].data);
4040  av_freep(&dst->side_data);
4041  dst->nb_side_data = 0;
4042 
4043  /* Copy side data if present */
4044  if (src->nb_side_data) {
4046  sizeof(AVPacketSideData));
4047  if (!dst->side_data)
4048  return AVERROR(ENOMEM);
4049  dst->nb_side_data = src->nb_side_data;
4050 
4051  for (i = 0; i < src->nb_side_data; i++) {
4052  uint8_t *data = av_memdup(src->side_data[i].data,
4053  src->side_data[i].size);
4054  if (!data)
4055  return AVERROR(ENOMEM);
4056  dst->side_data[i].type = src->side_data[i].type;
4057  dst->side_data[i].size = src->side_data[i].size;
4058  dst->side_data[i].data = data;
4059  }
4060  }
4061 
4064  const char *conf_str = src->recommended_encoder_configuration;
4067  return AVERROR(ENOMEM);
4068  }
4069 
4070  return 0;
4071 }
4072 
4073 static void free_stream(AVStream **pst)
4074 {
4075  AVStream *st = *pst;
4076  int i;
4077 
4078  if (!st)
4079  return;
4080 
4081  for (i = 0; i < st->nb_side_data; i++)
4082  av_freep(&st->side_data[i].data);
4083  av_freep(&st->side_data);
4084 
4085  if (st->parser)
4086  av_parser_close(st->parser);
4087 
4088  if (st->attached_pic.data)
4090 
4091  if (st->internal) {
4093  for (i = 0; i < st->internal->nb_bsfcs; i++) {
4094  av_bsf_free(&st->internal->bsfcs[i]);
4095  av_freep(&st->internal->bsfcs);
4096  }
4097  }
4098  av_freep(&st->internal);
4099 
4100  av_dict_free(&st->metadata);
4102  av_freep(&st->probe_data.buf);
4103  av_freep(&st->index_entries);
4104 #if FF_API_LAVF_AVCTX
4106  av_freep(&st->codec->extradata);
4107  av_freep(&st->codec->subtitle_header);
4108  av_freep(&st->codec);
4110 #endif
4111  av_freep(&st->priv_data);
4112  if (st->info)
4113  av_freep(&st->info->duration_error);
4114  av_freep(&st->info);
4116  av_freep(&st->priv_pts);
4117 
4118  av_freep(pst);
4119 }
4120 
4121 void ff_free_stream(AVFormatContext *s, AVStream *st)
4122 {
4123  av_assert0(s->nb_streams>0);
4124  av_assert0(s->streams[ s->nb_streams - 1 ] == st);
4125 
4126  free_stream(&s->streams[ --s->nb_streams ]);
4127 }
4128 
4129 void avformat_free_context(AVFormatContext *s)
4130 {
4131  int i;
4132 
4133  if (!s)
4134  return;
4135 
4136  av_opt_free(s);
4137  if (s->iformat && s->iformat->priv_class && s->priv_data)
4138  av_opt_free(s->priv_data);
4139  if (s->oformat && s->oformat->priv_class && s->priv_data)
4140  av_opt_free(s->priv_data);
4141 
4142  for (i = s->nb_streams - 1; i >= 0; i--)
4143  ff_free_stream(s, s->streams[i]);
4144 
4145 
4146  for (i = s->nb_programs - 1; i >= 0; i--) {
4147  av_dict_free(&s->programs[i]->metadata);
4148  av_freep(&s->programs[i]->stream_index);
4149  av_freep(&s->programs[i]);
4150  }
4151  av_freep(&s->programs);
4152  av_freep(&s->priv_data);
4153  while (s->nb_chapters--) {
4155  av_freep(&s->chapters[s->nb_chapters]);
4156  }
4157  av_freep(&s->chapters);
4158  av_dict_free(&s->metadata);
4159  av_freep(&s->streams);
4160  av_freep(&s->internal);
4161  flush_packet_queue(s);
4162  av_free(s);
4163 }
4164 
4165 void avformat_close_input(AVFormatContext **ps)
4166 {
4167  AVFormatContext *s;
4168  AVIOContext *pb;
4169 
4170  if (!ps || !*ps)
4171  return;
4172 
4173  s = *ps;
4174  pb = s->pb;
4175 
4176  if ((s->iformat && strcmp(s->iformat->name, "image2") && s->iformat->flags & AVFMT_NOFILE) ||
4177  (s->flags & AVFMT_FLAG_CUSTOM_IO))
4178  pb = NULL;
4179 
4180  flush_packet_queue(s);
4181 
4182  if (s->iformat)
4183  if (s->iformat->read_close)
4184  s->iformat->read_close(s);
4185 
4187 
4188  *ps = NULL;
4189 
4190  avio_close(pb);
4191 }
4192 
4193 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
4194 {
4195  AVStream *st;
4196  int i;
4197  AVStream **streams;
4198 
4199  if (s->nb_streams >= INT_MAX/sizeof(*streams))
4200  return NULL;
4201  streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
4202  if (!streams)
4203  return NULL;
4204  s->streams = streams;
4205 
4206  st = av_mallocz(sizeof(AVStream));
4207  if (!st)
4208  return NULL;
4209  if (!(st->info = av_mallocz(sizeof(*st->info)))) {
4210  av_free(st);
4211  return NULL;
4212  }
4213  st->info->last_dts = AV_NOPTS_VALUE;
4214 
4215 #if FF_API_LAVF_AVCTX
4217  st->codec = avcodec_alloc_context3(c);
4218  if (!st->codec) {
4219  av_free(st->info);
4220  av_free(st);
4221  return NULL;
4222  }
4224 #endif
4225 
4226  st->internal = av_mallocz(sizeof(*st->internal));
4227  if (!st->internal)
4228  goto fail;
4229 
4231  if (!st->codecpar)
4232  goto fail;
4233 
4235  if (!st->internal->avctx)
4236  goto fail;
4237 
4238  if (s->iformat) {
4239 #if FF_API_LAVF_AVCTX
4241  /* no default bitrate if decoding */
4242  st->codec->bit_rate = 0;
4244 #endif
4245 
4246  /* default pts setting is MPEG-like */
4247  avpriv_set_pts_info(st, 33, 1, 90000);
4248  /* we set the current DTS to 0 so that formats without any timestamps
4249  * but durations get some timestamps, formats with some unknown
4250  * timestamps have their first few packets buffered and the
4251  * timestamps corrected before they are returned to the user */
4252  st->cur_dts = RELATIVE_TS_BASE;
4253  } else {
4254  st->cur_dts = AV_NOPTS_VALUE;
4255  }
4256 
4257  st->index = s->nb_streams;
4258  st->start_time = AV_NOPTS_VALUE;
4259  st->duration = AV_NOPTS_VALUE;
4260  st->first_dts = AV_NOPTS_VALUE;
4264 
4267  for (i = 0; i < MAX_REORDER_DELAY + 1; i++)
4268  st->pts_buffer[i] = AV_NOPTS_VALUE;
4269 
4270  st->sample_aspect_ratio = (AVRational) { 0, 1 };
4271 
4272 #if FF_API_R_FRAME_RATE
4273  st->info->last_dts = AV_NOPTS_VALUE;
4274 #endif
4277 
4279 
4280  st->internal->need_context_update = 1;
4281 
4282  s->streams[s->nb_streams++] = st;
4283  return st;
4284 fail:
4285  free_stream(&st);
4286  return NULL;
4287 }
4288 
4289 AVProgram *av_new_program(AVFormatContext *ac, int id)
4290 {
4291  AVProgram *program = NULL;
4292  int i;
4293 
4294  av_log(ac, AV_LOG_TRACE, "new_program: id=0x%04x\n", id);
4295 
4296  for (i = 0; i < ac->nb_programs; i++)
4297  if (ac->programs[i]->id == id)
4298  program = ac->programs[i];
4299 
4300  if (!program) {
4301  program = av_mallocz(sizeof(AVProgram));
4302  if (!program)
4303  return NULL;
4304  dynarray_add(&ac->programs, &ac->nb_programs, program);
4305  program->discard = AVDISCARD_NONE;
4306  }
4307  program->id = id;
4310 
4311  program->start_time =
4312  program->end_time = AV_NOPTS_VALUE;
4313 
4314  return program;
4315 }
4316 
4317 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base,
4318  int64_t start, int64_t end, const char *title)
4319 {
4320  AVChapter *chapter = NULL;
4321  int i;
4322 
4323  if (end != AV_NOPTS_VALUE && start > end) {
4324  av_log(s, AV_LOG_ERROR, "Chapter end time %"PRId64" before start %"PRId64"\n", end, start);
4325  return NULL;
4326  }
4327 
4328  for (i = 0; i < s->nb_chapters; i++)
4329  if (s->chapters[i]->id == id)
4330  chapter = s->chapters[i];
4331 
4332  if (!chapter) {
4333  chapter = av_mallocz(sizeof(AVChapter));
4334  if (!chapter)
4335  return NULL;
4336  dynarray_add(&s->chapters, &s->nb_chapters, chapter);
4337  }
4338  av_dict_set(&chapter->metadata, "title", title, 0);
4339  chapter->id = id;
4340  chapter->time_base = time_base;
4341  chapter->start = start;
4342  chapter->end = end;
4343 
4344  return chapter;
4345 }
4346 
4347 void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned idx)
4348 {
4349  int i, j;
4350  AVProgram *program = NULL;
4351  void *tmp;
4352 
4353  if (idx >= ac->nb_streams) {
4354  av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
4355  return;
4356  }
4357 
4358  for (i = 0; i < ac->nb_programs; i++) {
4359  if (ac->programs[i]->id != progid)
4360  continue;
4361  program = ac->programs[i];
4362  for (j = 0; j < program->nb_stream_indexes; j++)
4363  if (program->stream_index[j] == idx)
4364  return;
4365 
4366  tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
4367  if (!tmp)
4368  return;
4369  program->stream_index = tmp;
4370  program->stream_index[program->nb_stream_indexes++] = idx;
4371  return;
4372  }
4373 }
4374 
4375 uint64_t ff_ntp_time(void)
4376 {
4377  return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
4378 }
4379 
4380 int av_get_frame_filename2(char *buf, int buf_size, const char *path, int number, int flags)
4381 {
4382  const char *p;
4383  char *q, buf1[20], c;
4384  int nd, len, percentd_found;
4385 
4386  q = buf;
4387  p = path;
4388  percentd_found = 0;
4389  for (;;) {
4390  c = *p++;
4391  if (c == '\0')
4392  break;
4393  if (c == '%') {
4394  do {
4395  nd = 0;
4396  while (av_isdigit(*p))
4397  nd = nd * 10 + *p++ - '0';
4398  c = *p++;
4399  } while (av_isdigit(c));
4400 
4401  switch (c) {
4402  case '%':
4403  goto addchar;
4404  case 'd':
4405  if (!(flags & AV_FRAME_FILENAME_FLAGS_MULTIPLE) && percentd_found)
4406  goto fail;
4407  percentd_found = 1;
4408  if (number < 0)
4409  nd += 1;
4410  snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
4411  len = strlen(buf1);
4412  if ((q - buf + len) > buf_size - 1)
4413  goto fail;
4414  memcpy(q, buf1, len);
4415  q += len;
4416  break;
4417  default:
4418  goto fail;
4419  }
4420  } else {
4421 addchar:
4422  if ((q - buf) < buf_size - 1)
4423  *q++ = c;
4424  }
4425  }
4426  if (!percentd_found)
4427  goto fail;
4428  *q = '\0';
4429  return 0;
4430 fail:
4431  *q = '\0';
4432  return -1;
4433 }
4434 
4435 int av_get_frame_filename(char *buf, int buf_size, const char *path, int number)
4436 {
4437  return av_get_frame_filename2(buf, buf_size, path, number, 0);
4438 }
4439 
4440 void av_url_split(char *proto, int proto_size,
4441  char *authorization, int authorization_size,
4442  char *hostname, int hostname_size,
4443  int *port_ptr, char *path, int path_size, const char *url)
4444 {
4445  const char *p, *ls, *ls2, *at, *at2, *col, *brk;
4446 
4447  if (port_ptr)
4448  *port_ptr = -1;
4449  if (proto_size > 0)
4450  proto[0] = 0;
4451  if (authorization_size > 0)
4452  authorization[0] = 0;
4453  if (hostname_size > 0)
4454  hostname[0] = 0;
4455  if (path_size > 0)
4456  path[0] = 0;
4457 
4458  /* parse protocol */
4459  if ((p = strchr(url, ':'))) {
4460  av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
4461  p++; /* skip ':' */
4462  if (*p == '/')
4463  p++;
4464  if (*p == '/')
4465  p++;
4466  } else {
4467  /* no protocol means plain filename */
4468  av_strlcpy(path, url, path_size);
4469  return;
4470  }
4471 
4472  /* separate path from hostname */
4473  ls = strchr(p, '/');
4474  ls2 = strchr(p, '?');
4475  if (!ls)
4476  ls = ls2;
4477  else if (ls && ls2)
4478  ls = FFMIN(ls, ls2);
4479  if (ls)
4480  av_strlcpy(path, ls, path_size);
4481  else
4482  ls = &p[strlen(p)]; // XXX
4483 
4484  /* the rest is hostname, use that to parse auth/port */
4485  if (ls != p) {
4486  /* authorization (user[:pass]@hostname) */
4487  at2 = p;
4488  while ((at = strchr(p, '@')) && at < ls) {
4489  av_strlcpy(authorization, at2,
4490  FFMIN(authorization_size, at + 1 - at2));
4491  p = at + 1; /* skip '@' */
4492  }
4493 
4494  if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
4495  /* [host]:port */
4496  av_strlcpy(hostname, p + 1,
4497  FFMIN(hostname_size, brk - p));
4498  if (brk[1] == ':' && port_ptr)
4499  *port_ptr = atoi(brk + 2);
4500  } else if ((col = strchr(p, ':')) && col < ls) {
4501  av_strlcpy(hostname, p,
4502  FFMIN(col + 1 - p, hostname_size));
4503  if (port_ptr)
4504  *port_ptr = atoi(col + 1);
4505  } else
4506  av_strlcpy(hostname, p,
4507  FFMIN(ls + 1 - p, hostname_size));
4508  }
4509 }
4510 
4511 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
4512 {
4513  int i;
4514  static const char hex_table_uc[16] = { '0', '1', '2', '3',
4515  '4', '5', '6', '7',
4516  '8', '9', 'A', 'B',
4517  'C', 'D', 'E', 'F' };
4518  static const char hex_table_lc[16] = { '0', '1', '2', '3',
4519  '4', '5', '6', '7',
4520  '8', '9', 'a', 'b',
4521  'c', 'd', 'e', 'f' };
4522  const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
4523 
4524  for (i = 0; i < s; i++) {
4525  buff[i * 2] = hex_table[src[i] >> 4];
4526  buff[i * 2 + 1] = hex_table[src[i] & 0xF];
4527  }
4528 
4529  return buff;
4530 }
4531 
4532 int ff_hex_to_data(uint8_t *data, const char *p)
4533 {
4534  int c, len, v;
4535 
4536  len = 0;
4537  v = 1;
4538  for (;;) {
4539  p += strspn(p, SPACE_CHARS);
4540  if (*p == '\0')
4541  break;
4542  c = av_toupper((unsigned char) *p++);
4543  if (c >= '0' && c <= '9')
4544  c = c - '0';
4545  else if (c >= 'A' && c <= 'F')
4546  c = c - 'A' + 10;
4547  else
4548  break;
4549  v = (v << 4) | c;
4550  if (v & 0x100) {
4551  if (data)
4552  data[len] = v;
4553  len++;
4554  v = 1;
4555  }
4556  }
4557  return len;
4558 }
4559 
4560 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
4561  unsigned int pts_num, unsigned int pts_den)
4562 {
4563  AVRational new_tb;
4564  if (av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)) {
4565  if (new_tb.num != pts_num)
4567  "st:%d removing common factor %d from timebase\n",
4568  s->index, pts_num / new_tb.num);
4569  } else
4571  "st:%d has too large timebase, reducing\n", s->index);
4572 
4573  if (new_tb.num <= 0 || new_tb.den <= 0) {
4575  "Ignoring attempt to set invalid timebase %d/%d for st:%d\n",
4576  new_tb.num, new_tb.den,
4577  s->index);
4578  return;
4579  }
4580  s->time_base = new_tb;
4581 #if FF_API_LAVF_AVCTX
4583  av_codec_set_pkt_timebase(s->codec, new_tb);
4585 #endif
4587  s->pts_wrap_bits = pts_wrap_bits;
4588 }
4589 
4590 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
4591  void *context)
4592 {
4593  const char *ptr = str;
4594 
4595  /* Parse key=value pairs. */
4596  for (;;) {
4597  const char *key;
4598  char *dest = NULL, *dest_end;
4599  int key_len, dest_len = 0;
4600 
4601  /* Skip whitespace and potential commas. */
4602  while (*ptr && (av_isspace(*ptr) || *ptr == ','))
4603  ptr++;
4604  if (!*ptr)
4605  break;
4606 
4607  key = ptr;
4608 
4609  if (!(ptr = strchr(key, '=')))
4610  break;
4611  ptr++;
4612  key_len = ptr - key;
4613 
4614  callback_get_buf(context, key, key_len, &dest, &dest_len);
4615  dest_end = dest + dest_len - 1;
4616 
4617  if (*ptr == '\"') {
4618  ptr++;
4619  while (*ptr && *ptr != '\"') {
4620  if (*ptr == '\\') {
4621  if (!ptr[1])
4622  break;
4623  if (dest && dest < dest_end)
4624  *dest++ = ptr[1];
4625  ptr += 2;
4626  } else {
4627  if (dest && dest < dest_end)
4628  *dest++ = *ptr;
4629  ptr++;
4630  }
4631  }
4632  if (*ptr == '\"')
4633  ptr++;
4634  } else {
4635  for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
4636  if (dest && dest < dest_end)
4637  *dest++ = *ptr;
4638  }
4639  if (dest)
4640  *dest = 0;
4641  }
4642 }
4643 
4644 int ff_find_stream_index(AVFormatContext *s, int id)
4645 {
4646  int i;
4647  for (i = 0; i < s->nb_streams; i++)
4648  if (s->streams[i]->id == id)
4649  return i;
4650  return -1;
4651 }
4652 
4654  int std_compliance)
4655 {
4656  if (ofmt) {
4657  unsigned int codec_tag;
4658  if (ofmt->query_codec)
4659  return ofmt->query_codec(codec_id, std_compliance);
4660  else if (ofmt->codec_tag)
4661  return !!av_codec_get_tag2(ofmt->codec_tag, codec_id, &codec_tag);
4662  else if (codec_id == ofmt->video_codec ||
4663  codec_id == ofmt->audio_codec ||
4664  codec_id == ofmt->subtitle_codec ||
4665  codec_id == ofmt->data_codec)
4666  return 1;
4667  }
4668  return AVERROR_PATCHWELCOME;
4669 }
4670 
4672 {
4673 #if CONFIG_NETWORK
4674  int ret;
4676  if ((ret = ff_network_init()) < 0)
4677  return ret;
4678  if ((ret = ff_tls_init()) < 0)
4679  return ret;
4680 #endif
4681  return 0;
4682 }
4683 
4685 {
4686 #if CONFIG_NETWORK
4687  ff_network_close();
4688  ff_tls_deinit();
4690 #endif
4691  return 0;
4692 }
4693 
4695  uint64_t channel_layout, int32_t sample_rate,
4697 {
4698  uint32_t flags = 0;
4699  int size = 4;
4700  uint8_t *data;
4701  if (!pkt)
4702  return AVERROR(EINVAL);
4703  if (channels) {
4704  size += 4;
4706  }
4707  if (channel_layout) {
4708  size += 8;
4710  }
4711  if (sample_rate) {
4712  size += 4;
4714  }
4715  if (width || height) {
4716  size += 8;
4718  }
4720  if (!data)
4721  return AVERROR(ENOMEM);
4722  bytestream_put_le32(&data, flags);
4723  if (channels)
4724  bytestream_put_le32(&data, channels);
4725  if (channel_layout)
4726  bytestream_put_le64(&data, channel_layout);
4727  if (sample_rate)
4728  bytestream_put_le32(&data, sample_rate);
4729  if (width || height) {
4730  bytestream_put_le32(&data, width);
4731  bytestream_put_le32(&data, height);
4732  }
4733  return 0;
4734 }
4735 
4736 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4737 {
4738  AVRational undef = {0, 1};
4739  AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4740  AVRational codec_sample_aspect_ratio = stream && stream->codecpar ? stream->codecpar->sample_aspect_ratio : undef;
4741  AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4742 
4743  av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4744  stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4745  if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4746  stream_sample_aspect_ratio = undef;
4747 
4748  av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4749  frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4750  if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4751  frame_sample_aspect_ratio = undef;
4752 
4753  if (stream_sample_aspect_ratio.num)
4754  return stream_sample_aspect_ratio;
4755  else
4756  return frame_sample_aspect_ratio;
4757 }
4758