FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 
31 #define INITIAL_BUFFER_SIZE 32768
32 #define MAX_BPRINT_READ_SIZE (UINT_MAX - 1)
33 #define DEFAULT_MANIFEST_SIZE 8 * 1024
34 
35 struct fragment {
36  int64_t url_offset;
37  int64_t size;
38  char *url;
39 };
40 
41 /*
42  * reference to : ISO_IEC_23009-1-DASH-2012
43  * Section: 5.3.9.6.2
44  * Table: Table 17 — Semantics of SegmentTimeline element
45  * */
46 struct timeline {
47  /* starttime: Element or Attribute Name
48  * specifies the MPD start time, in @timescale units,
49  * the first Segment in the series starts relative to the beginning of the Period.
50  * The value of this attribute must be equal to or greater than the sum of the previous S
51  * element earliest presentation time and the sum of the contiguous Segment durations.
52  * If the value of the attribute is greater than what is expressed by the previous S element,
53  * it expresses discontinuities in the timeline.
54  * If not present then the value shall be assumed to be zero for the first S element
55  * and for the subsequent S elements, the value shall be assumed to be the sum of
56  * the previous S element's earliest presentation time and contiguous duration
57  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
58  * */
59  int64_t starttime;
60  /* repeat: Element or Attribute Name
61  * specifies the repeat count of the number of following contiguous Segments with
62  * the same duration expressed by the value of @duration. This value is zero-based
63  * (e.g. a value of three means four Segments in the contiguous series).
64  * */
65  int64_t repeat;
66  /* duration: Element or Attribute Name
67  * specifies the Segment duration, in units of the value of the @timescale.
68  * */
69  int64_t duration;
70 };
71 
72 /*
73  * Each playlist has its own demuxer. If it is currently active,
74  * it has an opened AVIOContext too, and potentially an AVPacket
75  * containing the next packet from this stream.
76  */
78  char *url_template;
84 
85  char *id;
86  char *lang;
87  int bandwidth;
89  AVStream *assoc_stream; /* demuxer stream associated with this representation */
90 
92  struct fragment **fragments; /* VOD list of fragment for profile */
93 
95  struct timeline **timelines;
96 
97  int64_t first_seq_no;
98  int64_t last_seq_no;
99  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
100 
103 
105 
106  int64_t cur_seq_no;
107  int64_t cur_seg_offset;
108  int64_t cur_seg_size;
109  struct fragment *cur_seg;
110 
111  /* Currently active Media Initialization Section */
117  int64_t cur_timestamp;
119 };
120 
121 typedef struct DASHContext {
122  const AVClass *class;
123  char *base_url;
124 
125  int n_videos;
127  int n_audios;
131 
132  /* MediaPresentationDescription Attribute */
137  uint64_t publish_time;
140  uint64_t min_buffer_time;
141 
142  /* Period Attribute */
143  uint64_t period_duration;
144  uint64_t period_start;
145 
146  /* AdaptationSet Attribute */
148 
149  int is_live;
154 
155  /* Flags for init section*/
159 
160 } DASHContext;
161 
162 static int ishttp(char *url)
163 {
164  const char *proto_name = avio_find_protocol_name(url);
165  return proto_name && av_strstart(proto_name, "http", NULL);
166 }
167 
168 static int aligned(int val)
169 {
170  return ((val + 0x3F) >> 6) << 6;
171 }
172 
173 static uint64_t get_current_time_in_sec(void)
174 {
175  return av_gettime() / 1000000;
176 }
177 
178 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
179 {
180  struct tm timeinfo;
181  int year = 0;
182  int month = 0;
183  int day = 0;
184  int hour = 0;
185  int minute = 0;
186  int ret = 0;
187  float second = 0.0;
188 
189  /* ISO-8601 date parser */
190  if (!datetime)
191  return 0;
192 
193  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
194  /* year, month, day, hour, minute, second 6 arguments */
195  if (ret != 6) {
196  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
197  }
198  timeinfo.tm_year = year - 1900;
199  timeinfo.tm_mon = month - 1;
200  timeinfo.tm_mday = day;
201  timeinfo.tm_hour = hour;
202  timeinfo.tm_min = minute;
203  timeinfo.tm_sec = (int)second;
204 
205  return av_timegm(&timeinfo);
206 }
207 
208 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
209 {
210  /* ISO-8601 duration parser */
211  uint32_t days = 0;
212  uint32_t hours = 0;
213  uint32_t mins = 0;
214  uint32_t secs = 0;
215  int size = 0;
216  float value = 0;
217  char type = '\0';
218  const char *ptr = duration;
219 
220  while (*ptr) {
221  if (*ptr == 'P' || *ptr == 'T') {
222  ptr++;
223  continue;
224  }
225 
226  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
227  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
228  return 0; /* parser error */
229  }
230  switch (type) {
231  case 'D':
232  days = (uint32_t)value;
233  break;
234  case 'H':
235  hours = (uint32_t)value;
236  break;
237  case 'M':
238  mins = (uint32_t)value;
239  break;
240  case 'S':
241  secs = (uint32_t)value;
242  break;
243  default:
244  // handle invalid type
245  break;
246  }
247  ptr += size;
248  }
249  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
250 }
251 
252 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
253 {
254  int64_t start_time = 0;
255  int64_t i = 0;
256  int64_t j = 0;
257  int64_t num = 0;
258 
259  if (pls->n_timelines) {
260  for (i = 0; i < pls->n_timelines; i++) {
261  if (pls->timelines[i]->starttime > 0) {
262  start_time = pls->timelines[i]->starttime;
263  }
264  if (num == cur_seq_no)
265  goto finish;
266 
267  start_time += pls->timelines[i]->duration;
268 
269  if (pls->timelines[i]->repeat == -1) {
270  start_time = pls->timelines[i]->duration * cur_seq_no;
271  goto finish;
272  }
273 
274  for (j = 0; j < pls->timelines[i]->repeat; j++) {
275  num++;
276  if (num == cur_seq_no)
277  goto finish;
278  start_time += pls->timelines[i]->duration;
279  }
280  num++;
281  }
282  }
283 finish:
284  return start_time;
285 }
286 
287 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
288 {
289  int64_t i = 0;
290  int64_t j = 0;
291  int64_t num = 0;
292  int64_t start_time = 0;
293 
294  for (i = 0; i < pls->n_timelines; i++) {
295  if (pls->timelines[i]->starttime > 0) {
296  start_time = pls->timelines[i]->starttime;
297  }
298  if (start_time > cur_time)
299  goto finish;
300 
301  start_time += pls->timelines[i]->duration;
302  for (j = 0; j < pls->timelines[i]->repeat; j++) {
303  num++;
304  if (start_time > cur_time)
305  goto finish;
306  start_time += pls->timelines[i]->duration;
307  }
308  num++;
309  }
310 
311  return -1;
312 
313 finish:
314  return num;
315 }
316 
317 static void free_fragment(struct fragment **seg)
318 {
319  if (!(*seg)) {
320  return;
321  }
322  av_freep(&(*seg)->url);
323  av_freep(seg);
324 }
325 
326 static void free_fragment_list(struct representation *pls)
327 {
328  int i;
329 
330  for (i = 0; i < pls->n_fragments; i++) {
331  free_fragment(&pls->fragments[i]);
332  }
333  av_freep(&pls->fragments);
334  pls->n_fragments = 0;
335 }
336 
337 static void free_timelines_list(struct representation *pls)
338 {
339  int i;
340 
341  for (i = 0; i < pls->n_timelines; i++) {
342  av_freep(&pls->timelines[i]);
343  }
344  av_freep(&pls->timelines);
345  pls->n_timelines = 0;
346 }
347 
348 static void free_representation(struct representation *pls)
349 {
350  free_fragment_list(pls);
351  free_timelines_list(pls);
352  free_fragment(&pls->cur_seg);
354  av_freep(&pls->init_sec_buf);
355  av_freep(&pls->pb.buffer);
356  ff_format_io_close(pls->parent, &pls->input);
357  if (pls->ctx) {
358  pls->ctx->pb = NULL;
359  avformat_close_input(&pls->ctx);
360  }
361 
362  av_freep(&pls->url_template);
363  av_freep(&pls->lang);
364  av_freep(&pls->id);
365  av_freep(&pls);
366 }
367 
369 {
370  int i;
371  for (i = 0; i < c->n_videos; i++) {
372  struct representation *pls = c->videos[i];
373  free_representation(pls);
374  }
375  av_freep(&c->videos);
376  c->n_videos = 0;
377 }
378 
380 {
381  int i;
382  for (i = 0; i < c->n_audios; i++) {
383  struct representation *pls = c->audios[i];
384  free_representation(pls);
385  }
386  av_freep(&c->audios);
387  c->n_audios = 0;
388 }
389 
391 {
392  int i;
393  for (i = 0; i < c->n_subtitles; i++) {
394  struct representation *pls = c->subtitles[i];
395  free_representation(pls);
396  }
397  av_freep(&c->subtitles);
398  c->n_subtitles = 0;
399 }
400 
401 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
402  AVDictionary **opts, AVDictionary *opts2, int *is_http)
403 {
404  DASHContext *c = s->priv_data;
405  AVDictionary *tmp = NULL;
406  const char *proto_name = NULL;
407  int ret;
408 
409  if (av_strstart(url, "crypto", NULL)) {
410  if (url[6] == '+' || url[6] == ':')
411  proto_name = avio_find_protocol_name(url + 7);
412  }
413 
414  if (!proto_name)
415  proto_name = avio_find_protocol_name(url);
416 
417  if (!proto_name)
418  return AVERROR_INVALIDDATA;
419 
420  // only http(s) & file are allowed
421  if (av_strstart(proto_name, "file", NULL)) {
422  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
424  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
425  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
426  url);
427  return AVERROR_INVALIDDATA;
428  }
429  } else if (av_strstart(proto_name, "http", NULL)) {
430  ;
431  } else
432  return AVERROR_INVALIDDATA;
433 
434  if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
435  ;
436  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':')
437  ;
438  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
439  return AVERROR_INVALIDDATA;
440 
441  av_freep(pb);
442  av_dict_copy(&tmp, *opts, 0);
443  av_dict_copy(&tmp, opts2, 0);
444  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
445  if (ret >= 0) {
446  // update cookies on http response with setcookies.
447  char *new_cookies = NULL;
448 
449  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
450  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
451 
452  if (new_cookies) {
453  av_dict_set(opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
454  }
455 
456  }
457 
458  av_dict_free(&tmp);
459 
460  if (is_http)
461  *is_http = av_strstart(proto_name, "http", NULL);
462 
463  return ret;
464 }
465 
466 static char *get_content_url(xmlNodePtr *baseurl_nodes,
467  int n_baseurl_nodes,
468  int max_url_size,
469  char *rep_id_val,
470  char *rep_bandwidth_val,
471  char *val)
472 {
473  int i;
474  char *text;
475  char *url = NULL;
476  char *tmp_str = av_mallocz(max_url_size);
477 
478  if (!tmp_str)
479  return NULL;
480 
481  for (i = 0; i < n_baseurl_nodes; ++i) {
482  if (baseurl_nodes[i] &&
483  baseurl_nodes[i]->children &&
484  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
485  text = xmlNodeGetContent(baseurl_nodes[i]->children);
486  if (text) {
487  memset(tmp_str, 0, max_url_size);
488  ff_make_absolute_url(tmp_str, max_url_size, "", text);
489  xmlFree(text);
490  }
491  }
492  }
493 
494  if (val)
495  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
496 
497  if (rep_id_val) {
498  url = av_strireplace(tmp_str, "$RepresentationID$", rep_id_val);
499  if (!url) {
500  goto end;
501  }
502  av_strlcpy(tmp_str, url, max_url_size);
503  }
504  if (rep_bandwidth_val && tmp_str[0] != '\0') {
505  // free any previously assigned url before reassigning
506  av_free(url);
507  url = av_strireplace(tmp_str, "$Bandwidth$", rep_bandwidth_val);
508  if (!url) {
509  goto end;
510  }
511  }
512 end:
513  av_free(tmp_str);
514  return url;
515 }
516 
517 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
518 {
519  int i;
520  char *val;
521 
522  for (i = 0; i < n_nodes; ++i) {
523  if (nodes[i]) {
524  val = xmlGetProp(nodes[i], attrname);
525  if (val)
526  return val;
527  }
528  }
529 
530  return NULL;
531 }
532 
533 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
534 {
535  xmlNodePtr node = rootnode;
536  if (!node) {
537  return NULL;
538  }
539 
540  node = xmlFirstElementChild(node);
541  while (node) {
542  if (!av_strcasecmp(node->name, nodename)) {
543  return node;
544  }
545  node = xmlNextElementSibling(node);
546  }
547  return NULL;
548 }
549 
550 static enum AVMediaType get_content_type(xmlNodePtr node)
551 {
553  int i = 0;
554  const char *attr;
555  char *val = NULL;
556 
557  if (node) {
558  for (i = 0; i < 2; i++) {
559  attr = i ? "mimeType" : "contentType";
560  val = xmlGetProp(node, attr);
561  if (val) {
562  if (av_stristr(val, "video")) {
564  } else if (av_stristr(val, "audio")) {
566  } else if (av_stristr(val, "text")) {
568  }
569  xmlFree(val);
570  }
571  }
572  }
573  return type;
574 }
575 
576 static struct fragment * get_Fragment(char *range)
577 {
578  struct fragment * seg = av_mallocz(sizeof(struct fragment));
579 
580  if (!seg)
581  return NULL;
582 
583  seg->size = -1;
584  if (range) {
585  char *str_end_offset;
586  char *str_offset = av_strtok(range, "-", &str_end_offset);
587  seg->url_offset = strtoll(str_offset, NULL, 10);
588  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset + 1;
589  }
590 
591  return seg;
592 }
593 
595  xmlNodePtr fragmenturl_node,
596  xmlNodePtr *baseurl_nodes,
597  char *rep_id_val,
598  char *rep_bandwidth_val)
599 {
600  DASHContext *c = s->priv_data;
601  char *initialization_val = NULL;
602  char *media_val = NULL;
603  char *range_val = NULL;
604  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
605  int err;
606 
607  if (!av_strcasecmp(fragmenturl_node->name, "Initialization")) {
608  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
609  range_val = xmlGetProp(fragmenturl_node, "range");
610  if (initialization_val || range_val) {
612  rep->init_section = get_Fragment(range_val);
613  xmlFree(range_val);
614  if (!rep->init_section) {
615  xmlFree(initialization_val);
616  return AVERROR(ENOMEM);
617  }
618  rep->init_section->url = get_content_url(baseurl_nodes, 4,
619  max_url_size,
620  rep_id_val,
621  rep_bandwidth_val,
622  initialization_val);
623  xmlFree(initialization_val);
624  if (!rep->init_section->url) {
625  av_freep(&rep->init_section);
626  return AVERROR(ENOMEM);
627  }
628  }
629  } else if (!av_strcasecmp(fragmenturl_node->name, "SegmentURL")) {
630  media_val = xmlGetProp(fragmenturl_node, "media");
631  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
632  if (media_val || range_val) {
633  struct fragment *seg = get_Fragment(range_val);
634  xmlFree(range_val);
635  if (!seg) {
636  xmlFree(media_val);
637  return AVERROR(ENOMEM);
638  }
639  seg->url = get_content_url(baseurl_nodes, 4,
640  max_url_size,
641  rep_id_val,
642  rep_bandwidth_val,
643  media_val);
644  xmlFree(media_val);
645  if (!seg->url) {
646  av_free(seg);
647  return AVERROR(ENOMEM);
648  }
649  err = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
650  if (err < 0) {
651  free_fragment(&seg);
652  return err;
653  }
654  }
655  }
656 
657  return 0;
658 }
659 
661  xmlNodePtr fragment_timeline_node)
662 {
663  xmlAttrPtr attr = NULL;
664  char *val = NULL;
665  int err;
666 
667  if (!av_strcasecmp(fragment_timeline_node->name, "S")) {
668  struct timeline *tml = av_mallocz(sizeof(struct timeline));
669  if (!tml) {
670  return AVERROR(ENOMEM);
671  }
672  attr = fragment_timeline_node->properties;
673  while (attr) {
674  val = xmlGetProp(fragment_timeline_node, attr->name);
675 
676  if (!val) {
677  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
678  continue;
679  }
680 
681  if (!av_strcasecmp(attr->name, "t")) {
682  tml->starttime = (int64_t)strtoll(val, NULL, 10);
683  } else if (!av_strcasecmp(attr->name, "r")) {
684  tml->repeat =(int64_t) strtoll(val, NULL, 10);
685  } else if (!av_strcasecmp(attr->name, "d")) {
686  tml->duration = (int64_t)strtoll(val, NULL, 10);
687  }
688  attr = attr->next;
689  xmlFree(val);
690  }
691  err = av_dynarray_add_nofree(&rep->timelines, &rep->n_timelines, tml);
692  if (err < 0) {
693  av_free(tml);
694  return err;
695  }
696  }
697 
698  return 0;
699 }
700 
701 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
702 {
703  char *tmp_str = NULL;
704  char *path = NULL;
705  char *mpdName = NULL;
706  xmlNodePtr node = NULL;
707  char *baseurl = NULL;
708  char *root_url = NULL;
709  char *text = NULL;
710  char *tmp = NULL;
711  int isRootHttp = 0;
712  char token ='/';
713  int start = 0;
714  int rootId = 0;
715  int updated = 0;
716  int size = 0;
717  int i;
718  int tmp_max_url_size = strlen(url);
719 
720  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
721  text = xmlNodeGetContent(baseurl_nodes[i]);
722  if (!text)
723  continue;
724  tmp_max_url_size += strlen(text);
725  if (ishttp(text)) {
726  xmlFree(text);
727  break;
728  }
729  xmlFree(text);
730  }
731 
732  tmp_max_url_size = aligned(tmp_max_url_size);
733  text = av_mallocz(tmp_max_url_size);
734  if (!text) {
735  updated = AVERROR(ENOMEM);
736  goto end;
737  }
738  av_strlcpy(text, url, strlen(url)+1);
739  tmp = text;
740  while (mpdName = av_strtok(tmp, "/", &tmp)) {
741  size = strlen(mpdName);
742  }
743  av_free(text);
744 
745  path = av_mallocz(tmp_max_url_size);
746  tmp_str = av_mallocz(tmp_max_url_size);
747  if (!tmp_str || !path) {
748  updated = AVERROR(ENOMEM);
749  goto end;
750  }
751 
752  av_strlcpy (path, url, strlen(url) - size + 1);
753  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
754  if (!(node = baseurl_nodes[rootId])) {
755  continue;
756  }
757  text = xmlNodeGetContent(node);
758  if (ishttp(text)) {
759  xmlFree(text);
760  break;
761  }
762  xmlFree(text);
763  }
764 
765  node = baseurl_nodes[rootId];
766  baseurl = xmlNodeGetContent(node);
767  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
768  if (node) {
769  xmlNodeSetContent(node, root_url);
770  updated = 1;
771  }
772 
773  size = strlen(root_url);
774  isRootHttp = ishttp(root_url);
775 
776  if (size > 0 && root_url[size - 1] != token) {
777  av_strlcat(root_url, "/", size + 2);
778  size += 2;
779  }
780 
781  for (i = 0; i < n_baseurl_nodes; ++i) {
782  if (i == rootId) {
783  continue;
784  }
785  text = xmlNodeGetContent(baseurl_nodes[i]);
786  if (text && !av_strstart(text, "/", NULL)) {
787  memset(tmp_str, 0, strlen(tmp_str));
788  if (!ishttp(text) && isRootHttp) {
789  av_strlcpy(tmp_str, root_url, size + 1);
790  }
791  start = (text[0] == token);
792  if (start && av_stristr(tmp_str, text)) {
793  char *p = tmp_str;
794  if (!av_strncasecmp(tmp_str, "http://", 7)) {
795  p += 7;
796  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
797  p += 8;
798  }
799  p = strchr(p, '/');
800  memset(p + 1, 0, strlen(p));
801  }
802  av_strlcat(tmp_str, text + start, tmp_max_url_size);
803  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
804  updated = 1;
805  xmlFree(text);
806  }
807  }
808 
809 end:
810  if (tmp_max_url_size > *max_url_size) {
811  *max_url_size = tmp_max_url_size;
812  }
813  av_free(path);
814  av_free(tmp_str);
815  xmlFree(baseurl);
816  return updated;
817 
818 }
819 
820 static int parse_manifest_representation(AVFormatContext *s, const char *url,
821  xmlNodePtr node,
822  xmlNodePtr adaptionset_node,
823  xmlNodePtr mpd_baseurl_node,
824  xmlNodePtr period_baseurl_node,
825  xmlNodePtr period_segmenttemplate_node,
826  xmlNodePtr period_segmentlist_node,
827  xmlNodePtr fragment_template_node,
828  xmlNodePtr content_component_node,
829  xmlNodePtr adaptionset_baseurl_node,
830  xmlNodePtr adaptionset_segmentlist_node,
831  xmlNodePtr adaptionset_supplementalproperty_node)
832 {
833  int32_t ret = 0;
834  DASHContext *c = s->priv_data;
835  struct representation *rep = NULL;
836  struct fragment *seg = NULL;
837  xmlNodePtr representation_segmenttemplate_node = NULL;
838  xmlNodePtr representation_baseurl_node = NULL;
839  xmlNodePtr representation_segmentlist_node = NULL;
840  xmlNodePtr segmentlists_tab[3];
841  xmlNodePtr fragment_timeline_node = NULL;
842  xmlNodePtr fragment_templates_tab[5];
843  char *val = NULL;
844  xmlNodePtr baseurl_nodes[4];
845  xmlNodePtr representation_node = node;
846  char *rep_bandwidth_val;
848 
849  // try get information from representation
850  if (type == AVMEDIA_TYPE_UNKNOWN)
851  type = get_content_type(representation_node);
852  // try get information from contentComponen
853  if (type == AVMEDIA_TYPE_UNKNOWN)
854  type = get_content_type(content_component_node);
855  // try get information from adaption set
856  if (type == AVMEDIA_TYPE_UNKNOWN)
857  type = get_content_type(adaptionset_node);
860  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
861  return 0;
862  }
863 
864  // convert selected representation to our internal struct
865  rep = av_mallocz(sizeof(struct representation));
866  if (!rep)
867  return AVERROR(ENOMEM);
868  if (c->adaptionset_lang) {
869  rep->lang = av_strdup(c->adaptionset_lang);
870  if (!rep->lang) {
871  av_log(s, AV_LOG_ERROR, "alloc language memory failure\n");
872  av_freep(&rep);
873  return AVERROR(ENOMEM);
874  }
875  }
876  rep->parent = s;
877  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
878  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
879  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
880  rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
881  val = xmlGetProp(representation_node, "id");
882  if (val) {
883  rep->id = av_strdup(val);
884  xmlFree(val);
885  if (!rep->id)
886  goto enomem;
887  }
888 
889  baseurl_nodes[0] = mpd_baseurl_node;
890  baseurl_nodes[1] = period_baseurl_node;
891  baseurl_nodes[2] = adaptionset_baseurl_node;
892  baseurl_nodes[3] = representation_baseurl_node;
893 
894  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
895  c->max_url_size = aligned(c->max_url_size
896  + (rep->id ? strlen(rep->id) : 0)
897  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
898  if (ret == AVERROR(ENOMEM) || ret == 0)
899  goto free;
900  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
901  fragment_timeline_node = NULL;
902  fragment_templates_tab[0] = representation_segmenttemplate_node;
903  fragment_templates_tab[1] = adaptionset_segmentlist_node;
904  fragment_templates_tab[2] = fragment_template_node;
905  fragment_templates_tab[3] = period_segmenttemplate_node;
906  fragment_templates_tab[4] = period_segmentlist_node;
907 
908  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
909  if (val) {
910  rep->init_section = av_mallocz(sizeof(struct fragment));
911  if (!rep->init_section) {
912  xmlFree(val);
913  goto enomem;
914  }
915  c->max_url_size = aligned(c->max_url_size + strlen(val));
916  rep->init_section->url = get_content_url(baseurl_nodes, 4,
917  c->max_url_size, rep->id,
918  rep_bandwidth_val, val);
919  xmlFree(val);
920  if (!rep->init_section->url)
921  goto enomem;
922  rep->init_section->size = -1;
923  }
924  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
925  if (val) {
926  c->max_url_size = aligned(c->max_url_size + strlen(val));
927  rep->url_template = get_content_url(baseurl_nodes, 4,
928  c->max_url_size, rep->id,
929  rep_bandwidth_val, val);
930  xmlFree(val);
931  }
932  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
933  if (val) {
934  rep->presentation_timeoffset = (int64_t) strtoll(val, NULL, 10);
935  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
936  xmlFree(val);
937  }
938  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
939  if (val) {
940  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
941  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
942  xmlFree(val);
943  }
944  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
945  if (val) {
946  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
947  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
948  xmlFree(val);
949  }
950  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
951  if (val) {
952  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
953  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
954  xmlFree(val);
955  }
956  if (adaptionset_supplementalproperty_node) {
957  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
958  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
959  if (!val) {
960  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
961  } else {
962  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
963  xmlFree(val);
964  }
965  }
966  }
967 
968  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
969 
970  if (!fragment_timeline_node)
971  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
972  if (!fragment_timeline_node)
973  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
974  if (!fragment_timeline_node)
975  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
976  if (fragment_timeline_node) {
977  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
978  while (fragment_timeline_node) {
979  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
980  if (ret < 0)
981  goto free;
982  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
983  }
984  }
985  } else if (representation_baseurl_node && !representation_segmentlist_node) {
986  seg = av_mallocz(sizeof(struct fragment));
987  if (!seg)
988  goto enomem;
989  ret = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
990  if (ret < 0) {
991  av_free(seg);
992  goto free;
993  }
994  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size,
995  rep->id, rep_bandwidth_val, NULL);
996  if (!seg->url)
997  goto enomem;
998  seg->size = -1;
999  } else if (representation_segmentlist_node) {
1000  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
1001  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
1002  xmlNodePtr fragmenturl_node = NULL;
1003  segmentlists_tab[0] = representation_segmentlist_node;
1004  segmentlists_tab[1] = adaptionset_segmentlist_node;
1005  segmentlists_tab[2] = period_segmentlist_node;
1006 
1007  val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
1008  if (val) {
1009  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
1010  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
1011  xmlFree(val);
1012  }
1013  val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1014  if (val) {
1015  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
1016  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1017  xmlFree(val);
1018  }
1019  val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1020  if (val) {
1021  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
1022  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1023  xmlFree(val);
1024  }
1025 
1026  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1027  while (fragmenturl_node) {
1028  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1029  baseurl_nodes, rep->id,
1030  rep_bandwidth_val);
1031  if (ret < 0)
1032  goto free;
1033  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1034  }
1035 
1036  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1037  if (!fragment_timeline_node)
1038  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1039  if (fragment_timeline_node) {
1040  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1041  while (fragment_timeline_node) {
1042  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1043  if (ret < 0)
1044  goto free;
1045  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1046  }
1047  }
1048  } else {
1049  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id '%s' \n",
1050  rep->id ? rep->id : "");
1051  goto free;
1052  }
1053 
1054  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1055  rep->fragment_timescale = 1;
1056  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1057  rep->framerate = av_make_q(0, 0);
1058  if (type == AVMEDIA_TYPE_VIDEO) {
1059  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
1060  if (rep_framerate_val) {
1061  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1062  if (ret < 0)
1063  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1064  xmlFree(rep_framerate_val);
1065  }
1066  }
1067 
1068  switch (type) {
1069  case AVMEDIA_TYPE_VIDEO:
1070  ret = av_dynarray_add_nofree(&c->videos, &c->n_videos, rep);
1071  break;
1072  case AVMEDIA_TYPE_AUDIO:
1073  ret = av_dynarray_add_nofree(&c->audios, &c->n_audios, rep);
1074  break;
1075  case AVMEDIA_TYPE_SUBTITLE:
1076  ret = av_dynarray_add_nofree(&c->subtitles, &c->n_subtitles, rep);
1077  break;
1078  }
1079  if (ret < 0)
1080  goto free;
1081 
1082 end:
1083  if (rep_bandwidth_val)
1084  xmlFree(rep_bandwidth_val);
1085 
1086  return ret;
1087 enomem:
1088  ret = AVERROR(ENOMEM);
1089 free:
1090  free_representation(rep);
1091  goto end;
1092 }
1093 
1094 static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
1095 {
1096  DASHContext *c = s->priv_data;
1097 
1098  if (!adaptionset_node) {
1099  av_log(s, AV_LOG_WARNING, "Cannot get AdaptionSet\n");
1100  return AVERROR(EINVAL);
1101  }
1102  c->adaptionset_lang = xmlGetProp(adaptionset_node, "lang");
1103 
1104  return 0;
1105 }
1106 
1108  xmlNodePtr adaptionset_node,
1109  xmlNodePtr mpd_baseurl_node,
1110  xmlNodePtr period_baseurl_node,
1111  xmlNodePtr period_segmenttemplate_node,
1112  xmlNodePtr period_segmentlist_node)
1113 {
1114  int ret = 0;
1115  DASHContext *c = s->priv_data;
1116  xmlNodePtr fragment_template_node = NULL;
1117  xmlNodePtr content_component_node = NULL;
1118  xmlNodePtr adaptionset_baseurl_node = NULL;
1119  xmlNodePtr adaptionset_segmentlist_node = NULL;
1120  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1121  xmlNodePtr node = NULL;
1122 
1123  ret = parse_manifest_adaptationset_attr(s, adaptionset_node);
1124  if (ret < 0)
1125  return ret;
1126 
1127  node = xmlFirstElementChild(adaptionset_node);
1128  while (node) {
1129  if (!av_strcasecmp(node->name, "SegmentTemplate")) {
1130  fragment_template_node = node;
1131  } else if (!av_strcasecmp(node->name, "ContentComponent")) {
1132  content_component_node = node;
1133  } else if (!av_strcasecmp(node->name, "BaseURL")) {
1134  adaptionset_baseurl_node = node;
1135  } else if (!av_strcasecmp(node->name, "SegmentList")) {
1136  adaptionset_segmentlist_node = node;
1137  } else if (!av_strcasecmp(node->name, "SupplementalProperty")) {
1138  adaptionset_supplementalproperty_node = node;
1139  } else if (!av_strcasecmp(node->name, "Representation")) {
1141  adaptionset_node,
1142  mpd_baseurl_node,
1143  period_baseurl_node,
1144  period_segmenttemplate_node,
1145  period_segmentlist_node,
1146  fragment_template_node,
1147  content_component_node,
1148  adaptionset_baseurl_node,
1149  adaptionset_segmentlist_node,
1150  adaptionset_supplementalproperty_node);
1151  if (ret < 0)
1152  goto err;
1153  }
1154  node = xmlNextElementSibling(node);
1155  }
1156 
1157 err:
1158  xmlFree(c->adaptionset_lang);
1159  c->adaptionset_lang = NULL;
1160  return ret;
1161 }
1162 
1163 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1164 {
1165  xmlChar *val = NULL;
1166 
1167  node = xmlFirstElementChild(node);
1168  while (node) {
1169  if (!av_strcasecmp(node->name, "Title")) {
1170  val = xmlNodeGetContent(node);
1171  if (val) {
1172  av_dict_set(&s->metadata, "Title", val, 0);
1173  }
1174  } else if (!av_strcasecmp(node->name, "Source")) {
1175  val = xmlNodeGetContent(node);
1176  if (val) {
1177  av_dict_set(&s->metadata, "Source", val, 0);
1178  }
1179  } else if (!av_strcasecmp(node->name, "Copyright")) {
1180  val = xmlNodeGetContent(node);
1181  if (val) {
1182  av_dict_set(&s->metadata, "Copyright", val, 0);
1183  }
1184  }
1185  node = xmlNextElementSibling(node);
1186  xmlFree(val);
1187  val = NULL;
1188  }
1189  return 0;
1190 }
1191 
1192 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1193 {
1194  DASHContext *c = s->priv_data;
1195  int ret = 0;
1196  int close_in = 0;
1197  int64_t filesize = 0;
1198  AVBPrint buf;
1199  AVDictionary *opts = NULL;
1200  xmlDoc *doc = NULL;
1201  xmlNodePtr root_element = NULL;
1202  xmlNodePtr node = NULL;
1203  xmlNodePtr period_node = NULL;
1204  xmlNodePtr tmp_node = NULL;
1205  xmlNodePtr mpd_baseurl_node = NULL;
1206  xmlNodePtr period_baseurl_node = NULL;
1207  xmlNodePtr period_segmenttemplate_node = NULL;
1208  xmlNodePtr period_segmentlist_node = NULL;
1209  xmlNodePtr adaptionset_node = NULL;
1210  xmlAttrPtr attr = NULL;
1211  char *val = NULL;
1212  uint32_t period_duration_sec = 0;
1213  uint32_t period_start_sec = 0;
1214 
1215  if (!in) {
1216  close_in = 1;
1217 
1218  av_dict_copy(&opts, c->avio_opts, 0);
1219  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1220  av_dict_free(&opts);
1221  if (ret < 0)
1222  return ret;
1223  }
1224 
1225  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&c->base_url) < 0)
1226  c->base_url = av_strdup(url);
1227 
1228  filesize = avio_size(in);
1229  filesize = filesize > 0 ? filesize : DEFAULT_MANIFEST_SIZE;
1230 
1231  if (filesize > MAX_BPRINT_READ_SIZE) {
1232  av_log(s, AV_LOG_ERROR, "Manifest too large: %"PRId64"\n", filesize);
1233  return AVERROR_INVALIDDATA;
1234  }
1235 
1236  av_bprint_init(&buf, filesize + 1, AV_BPRINT_SIZE_UNLIMITED);
1237 
1238  if ((ret = avio_read_to_bprint(in, &buf, MAX_BPRINT_READ_SIZE)) < 0 ||
1239  !avio_feof(in) ||
1240  (filesize = buf.len) == 0) {
1241  av_log(s, AV_LOG_ERROR, "Unable to read to manifest '%s'\n", url);
1242  if (ret == 0)
1244  } else {
1245  LIBXML_TEST_VERSION
1246 
1247  doc = xmlReadMemory(buf.str, filesize, c->base_url, NULL, 0);
1248  root_element = xmlDocGetRootElement(doc);
1249  node = root_element;
1250 
1251  if (!node) {
1253  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1254  goto cleanup;
1255  }
1256 
1257  if (node->type != XML_ELEMENT_NODE ||
1258  av_strcasecmp(node->name, "MPD")) {
1260  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1261  goto cleanup;
1262  }
1263 
1264  val = xmlGetProp(node, "type");
1265  if (!val) {
1266  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1268  goto cleanup;
1269  }
1270  if (!av_strcasecmp(val, "dynamic"))
1271  c->is_live = 1;
1272  xmlFree(val);
1273 
1274  attr = node->properties;
1275  while (attr) {
1276  val = xmlGetProp(node, attr->name);
1277 
1278  if (!av_strcasecmp(attr->name, "availabilityStartTime")) {
1279  c->availability_start_time = get_utc_date_time_insec(s, val);
1280  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1281  } else if (!av_strcasecmp(attr->name, "availabilityEndTime")) {
1282  c->availability_end_time = get_utc_date_time_insec(s, val);
1283  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1284  } else if (!av_strcasecmp(attr->name, "publishTime")) {
1285  c->publish_time = get_utc_date_time_insec(s, val);
1286  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1287  } else if (!av_strcasecmp(attr->name, "minimumUpdatePeriod")) {
1288  c->minimum_update_period = get_duration_insec(s, val);
1289  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1290  } else if (!av_strcasecmp(attr->name, "timeShiftBufferDepth")) {
1291  c->time_shift_buffer_depth = get_duration_insec(s, val);
1292  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1293  } else if (!av_strcasecmp(attr->name, "minBufferTime")) {
1294  c->min_buffer_time = get_duration_insec(s, val);
1295  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1296  } else if (!av_strcasecmp(attr->name, "suggestedPresentationDelay")) {
1297  c->suggested_presentation_delay = get_duration_insec(s, val);
1298  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1299  } else if (!av_strcasecmp(attr->name, "mediaPresentationDuration")) {
1300  c->media_presentation_duration = get_duration_insec(s, val);
1301  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1302  }
1303  attr = attr->next;
1304  xmlFree(val);
1305  }
1306 
1307  tmp_node = find_child_node_by_name(node, "BaseURL");
1308  if (tmp_node) {
1309  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1310  } else {
1311  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1312  }
1313 
1314  // at now we can handle only one period, with the longest duration
1315  node = xmlFirstElementChild(node);
1316  while (node) {
1317  if (!av_strcasecmp(node->name, "Period")) {
1318  period_duration_sec = 0;
1319  period_start_sec = 0;
1320  attr = node->properties;
1321  while (attr) {
1322  val = xmlGetProp(node, attr->name);
1323  if (!av_strcasecmp(attr->name, "duration")) {
1324  period_duration_sec = get_duration_insec(s, val);
1325  } else if (!av_strcasecmp(attr->name, "start")) {
1326  period_start_sec = get_duration_insec(s, val);
1327  }
1328  attr = attr->next;
1329  xmlFree(val);
1330  }
1331  if ((period_duration_sec) >= (c->period_duration)) {
1332  period_node = node;
1333  c->period_duration = period_duration_sec;
1334  c->period_start = period_start_sec;
1335  if (c->period_start > 0)
1336  c->media_presentation_duration = c->period_duration;
1337  }
1338  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1339  parse_programinformation(s, node);
1340  }
1341  node = xmlNextElementSibling(node);
1342  }
1343  if (!period_node) {
1344  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1346  goto cleanup;
1347  }
1348 
1349  adaptionset_node = xmlFirstElementChild(period_node);
1350  while (adaptionset_node) {
1351  if (!av_strcasecmp(adaptionset_node->name, "BaseURL")) {
1352  period_baseurl_node = adaptionset_node;
1353  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentTemplate")) {
1354  period_segmenttemplate_node = adaptionset_node;
1355  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentList")) {
1356  period_segmentlist_node = adaptionset_node;
1357  } else if (!av_strcasecmp(adaptionset_node->name, "AdaptationSet")) {
1358  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1359  }
1360  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1361  }
1362 cleanup:
1363  /*free the document */
1364  xmlFreeDoc(doc);
1365  xmlCleanupParser();
1366  xmlFreeNode(mpd_baseurl_node);
1367  }
1368 
1369  av_bprint_finalize(&buf, NULL);
1370  if (close_in) {
1371  avio_close(in);
1372  }
1373  return ret;
1374 }
1375 
1376 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1377 {
1378  DASHContext *c = s->priv_data;
1379  int64_t num = 0;
1380  int64_t start_time_offset = 0;
1381 
1382  if (c->is_live) {
1383  if (pls->n_fragments) {
1384  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1385  num = pls->first_seq_no;
1386  } else if (pls->n_timelines) {
1387  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1388  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1389  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1390  if (num == -1)
1391  num = pls->first_seq_no;
1392  else
1393  num += pls->first_seq_no;
1394  } else if (pls->fragment_duration){
1395  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1396  if (pls->presentation_timeoffset) {
1397  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) * pls->fragment_timescale)-pls->presentation_timeoffset) / pls->fragment_duration - c->min_buffer_time;
1398  } else if (c->publish_time > 0 && !c->availability_start_time) {
1399  if (c->min_buffer_time) {
1400  num = pls->first_seq_no + (((c->publish_time + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration - c->min_buffer_time;
1401  } else {
1402  num = pls->first_seq_no + (((c->publish_time - c->time_shift_buffer_depth + pls->fragment_duration) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1403  }
1404  } else {
1405  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->suggested_presentation_delay) * pls->fragment_timescale) / pls->fragment_duration;
1406  }
1407  }
1408  } else {
1409  num = pls->first_seq_no;
1410  }
1411  return num;
1412 }
1413 
1414 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1415 {
1416  DASHContext *c = s->priv_data;
1417  int64_t num = 0;
1418 
1419  if (c->is_live && pls->fragment_duration) {
1420  av_log(s, AV_LOG_TRACE, "in live mode\n");
1421  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time) - c->time_shift_buffer_depth) * pls->fragment_timescale) / pls->fragment_duration;
1422  } else {
1423  num = pls->first_seq_no;
1424  }
1425  return num;
1426 }
1427 
1428 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1429 {
1430  int64_t num = 0;
1431 
1432  if (pls->n_fragments) {
1433  num = pls->first_seq_no + pls->n_fragments - 1;
1434  } else if (pls->n_timelines) {
1435  int i = 0;
1436  num = pls->first_seq_no + pls->n_timelines - 1;
1437  for (i = 0; i < pls->n_timelines; i++) {
1438  if (pls->timelines[i]->repeat == -1) {
1439  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1440  num = c->period_duration / length_of_each_segment;
1441  } else {
1442  num += pls->timelines[i]->repeat;
1443  }
1444  }
1445  } else if (c->is_live && pls->fragment_duration) {
1446  num = pls->first_seq_no + (((get_current_time_in_sec() - c->availability_start_time)) * pls->fragment_timescale) / pls->fragment_duration;
1447  } else if (pls->fragment_duration) {
1448  num = pls->first_seq_no + (c->media_presentation_duration * pls->fragment_timescale) / pls->fragment_duration;
1449  }
1450 
1451  return num;
1452 }
1453 
1454 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1455 {
1456  if (rep_dest && rep_src ) {
1457  free_timelines_list(rep_dest);
1458  rep_dest->timelines = rep_src->timelines;
1459  rep_dest->n_timelines = rep_src->n_timelines;
1460  rep_dest->first_seq_no = rep_src->first_seq_no;
1461  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1462  rep_src->timelines = NULL;
1463  rep_src->n_timelines = 0;
1464  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1465  }
1466 }
1467 
1468 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1469 {
1470  if (rep_dest && rep_src ) {
1471  free_fragment_list(rep_dest);
1472  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1473  rep_dest->cur_seq_no = 0;
1474  else
1475  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1476  rep_dest->fragments = rep_src->fragments;
1477  rep_dest->n_fragments = rep_src->n_fragments;
1478  rep_dest->parent = rep_src->parent;
1479  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1480  rep_src->fragments = NULL;
1481  rep_src->n_fragments = 0;
1482  }
1483 }
1484 
1485 
1487 {
1488  int ret = 0, i;
1489  DASHContext *c = s->priv_data;
1490  // save current context
1491  int n_videos = c->n_videos;
1492  struct representation **videos = c->videos;
1493  int n_audios = c->n_audios;
1494  struct representation **audios = c->audios;
1495  int n_subtitles = c->n_subtitles;
1496  struct representation **subtitles = c->subtitles;
1497  char *base_url = c->base_url;
1498 
1499  c->base_url = NULL;
1500  c->n_videos = 0;
1501  c->videos = NULL;
1502  c->n_audios = 0;
1503  c->audios = NULL;
1504  c->n_subtitles = 0;
1505  c->subtitles = NULL;
1506  ret = parse_manifest(s, s->url, NULL);
1507  if (ret)
1508  goto finish;
1509 
1510  if (c->n_videos != n_videos) {
1512  "new manifest has mismatched no. of video representations, %d -> %d\n",
1513  n_videos, c->n_videos);
1514  return AVERROR_INVALIDDATA;
1515  }
1516  if (c->n_audios != n_audios) {
1518  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1519  n_audios, c->n_audios);
1520  return AVERROR_INVALIDDATA;
1521  }
1522  if (c->n_subtitles != n_subtitles) {
1524  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1525  n_subtitles, c->n_subtitles);
1526  return AVERROR_INVALIDDATA;
1527  }
1528 
1529  for (i = 0; i < n_videos; i++) {
1530  struct representation *cur_video = videos[i];
1531  struct representation *ccur_video = c->videos[i];
1532  if (cur_video->timelines) {
1533  // calc current time
1534  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1535  // update segments
1536  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1537  if (ccur_video->cur_seq_no >= 0) {
1538  move_timelines(ccur_video, cur_video, c);
1539  }
1540  }
1541  if (cur_video->fragments) {
1542  move_segments(ccur_video, cur_video, c);
1543  }
1544  }
1545  for (i = 0; i < n_audios; i++) {
1546  struct representation *cur_audio = audios[i];
1547  struct representation *ccur_audio = c->audios[i];
1548  if (cur_audio->timelines) {
1549  // calc current time
1550  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1551  // update segments
1552  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1553  if (ccur_audio->cur_seq_no >= 0) {
1554  move_timelines(ccur_audio, cur_audio, c);
1555  }
1556  }
1557  if (cur_audio->fragments) {
1558  move_segments(ccur_audio, cur_audio, c);
1559  }
1560  }
1561 
1562 finish:
1563  // restore context
1564  if (c->base_url)
1565  av_free(base_url);
1566  else
1567  c->base_url = base_url;
1568 
1569  if (c->subtitles)
1571  if (c->audios)
1572  free_audio_list(c);
1573  if (c->videos)
1574  free_video_list(c);
1575 
1576  c->n_subtitles = n_subtitles;
1577  c->subtitles = subtitles;
1578  c->n_audios = n_audios;
1579  c->audios = audios;
1580  c->n_videos = n_videos;
1581  c->videos = videos;
1582  return ret;
1583 }
1584 
1585 static struct fragment *get_current_fragment(struct representation *pls)
1586 {
1587  int64_t min_seq_no = 0;
1588  int64_t max_seq_no = 0;
1589  struct fragment *seg = NULL;
1590  struct fragment *seg_ptr = NULL;
1591  DASHContext *c = pls->parent->priv_data;
1592 
1593  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1594  if (pls->cur_seq_no < pls->n_fragments) {
1595  seg_ptr = pls->fragments[pls->cur_seq_no];
1596  seg = av_mallocz(sizeof(struct fragment));
1597  if (!seg) {
1598  return NULL;
1599  }
1600  seg->url = av_strdup(seg_ptr->url);
1601  if (!seg->url) {
1602  av_free(seg);
1603  return NULL;
1604  }
1605  seg->size = seg_ptr->size;
1606  seg->url_offset = seg_ptr->url_offset;
1607  return seg;
1608  } else if (c->is_live) {
1609  refresh_manifest(pls->parent);
1610  } else {
1611  break;
1612  }
1613  }
1614  if (c->is_live) {
1615  min_seq_no = calc_min_seg_no(pls->parent, pls);
1616  max_seq_no = calc_max_seg_no(pls, c);
1617 
1618  if (pls->timelines || pls->fragments) {
1619  refresh_manifest(pls->parent);
1620  }
1621  if (pls->cur_seq_no <= min_seq_no) {
1622  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"]\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no);
1623  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1624  } else if (pls->cur_seq_no > max_seq_no) {
1625  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"]\n", min_seq_no, max_seq_no);
1626  }
1627  seg = av_mallocz(sizeof(struct fragment));
1628  if (!seg) {
1629  return NULL;
1630  }
1631  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1632  seg = av_mallocz(sizeof(struct fragment));
1633  if (!seg) {
1634  return NULL;
1635  }
1636  }
1637  if (seg) {
1638  char *tmpfilename;
1639  if (!pls->url_template) {
1640  av_log(pls->parent, AV_LOG_ERROR, "Cannot get fragment, missing template URL\n");
1641  av_free(seg);
1642  return NULL;
1643  }
1644  tmpfilename = av_mallocz(c->max_url_size);
1645  if (!tmpfilename) {
1646  av_free(seg);
1647  return NULL;
1648  }
1649  ff_dash_fill_tmpl_params(tmpfilename, c->max_url_size, pls->url_template, 0, pls->cur_seq_no, 0, get_segment_start_time_based_on_timeline(pls, pls->cur_seq_no));
1650  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1651  if (!seg->url) {
1652  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1653  seg->url = av_strdup(pls->url_template);
1654  if (!seg->url) {
1655  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1656  av_free(tmpfilename);
1657  av_free(seg);
1658  return NULL;
1659  }
1660  }
1661  av_free(tmpfilename);
1662  seg->size = -1;
1663  }
1664 
1665  return seg;
1666 }
1667 
1668 static int read_from_url(struct representation *pls, struct fragment *seg,
1669  uint8_t *buf, int buf_size)
1670 {
1671  int ret;
1672 
1673  /* limit read if the fragment was only a part of a file */
1674  if (seg->size >= 0)
1675  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1676 
1677  ret = avio_read(pls->input, buf, buf_size);
1678  if (ret > 0)
1679  pls->cur_seg_offset += ret;
1680 
1681  return ret;
1682 }
1683 
1684 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1685 {
1686  AVDictionary *opts = NULL;
1687  char *url = NULL;
1688  int ret = 0;
1689 
1690  url = av_mallocz(c->max_url_size);
1691  if (!url) {
1692  ret = AVERROR(ENOMEM);
1693  goto cleanup;
1694  }
1695 
1696  if (seg->size >= 0) {
1697  /* try to restrict the HTTP request to the part we want
1698  * (if this is in fact a HTTP request) */
1699  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1700  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1701  }
1702 
1703  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1704  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64"\n",
1705  url, seg->url_offset);
1706  ret = open_url(pls->parent, &pls->input, url, &c->avio_opts, opts, NULL);
1707 
1708 cleanup:
1709  av_free(url);
1710  av_dict_free(&opts);
1711  pls->cur_seg_offset = 0;
1712  pls->cur_seg_size = seg->size;
1713  return ret;
1714 }
1715 
1716 static int update_init_section(struct representation *pls)
1717 {
1718  static const int max_init_section_size = 1024 * 1024;
1719  DASHContext *c = pls->parent->priv_data;
1720  int64_t sec_size;
1721  int64_t urlsize;
1722  int ret;
1723 
1724  if (!pls->init_section || pls->init_sec_buf)
1725  return 0;
1726 
1727  ret = open_input(c, pls, pls->init_section);
1728  if (ret < 0) {
1730  "Failed to open an initialization section\n");
1731  return ret;
1732  }
1733 
1734  if (pls->init_section->size >= 0)
1735  sec_size = pls->init_section->size;
1736  else if ((urlsize = avio_size(pls->input)) >= 0)
1737  sec_size = urlsize;
1738  else
1739  sec_size = max_init_section_size;
1740 
1741  av_log(pls->parent, AV_LOG_DEBUG,
1742  "Downloading an initialization section of size %"PRId64"\n",
1743  sec_size);
1744 
1745  sec_size = FFMIN(sec_size, max_init_section_size);
1746 
1747  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1748 
1749  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1750  pls->init_sec_buf_size);
1751  ff_format_io_close(pls->parent, &pls->input);
1752 
1753  if (ret < 0)
1754  return ret;
1755 
1756  pls->init_sec_data_len = ret;
1757  pls->init_sec_buf_read_offset = 0;
1758 
1759  return 0;
1760 }
1761 
1762 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1763 {
1764  struct representation *v = opaque;
1765  if (v->n_fragments && !v->init_sec_data_len) {
1766  return avio_seek(v->input, offset, whence);
1767  }
1768 
1769  return AVERROR(ENOSYS);
1770 }
1771 
1772 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1773 {
1774  int ret = 0;
1775  struct representation *v = opaque;
1776  DASHContext *c = v->parent->priv_data;
1777 
1778 restart:
1779  if (!v->input) {
1780  free_fragment(&v->cur_seg);
1781  v->cur_seg = get_current_fragment(v);
1782  if (!v->cur_seg) {
1783  ret = AVERROR_EOF;
1784  goto end;
1785  }
1786 
1787  /* load/update Media Initialization Section, if any */
1788  ret = update_init_section(v);
1789  if (ret)
1790  goto end;
1791 
1792  ret = open_input(c, v, v->cur_seg);
1793  if (ret < 0) {
1794  if (ff_check_interrupt(c->interrupt_callback)) {
1795  ret = AVERROR_EXIT;
1796  goto end;
1797  }
1798  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist\n");
1799  v->cur_seq_no++;
1800  goto restart;
1801  }
1802  }
1803 
1805  /* Push init section out first before first actual fragment */
1806  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1807  memcpy(buf, v->init_sec_buf, copy_size);
1808  v->init_sec_buf_read_offset += copy_size;
1809  ret = copy_size;
1810  goto end;
1811  }
1812 
1813  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1814  if (!v->cur_seg) {
1815  v->cur_seg = get_current_fragment(v);
1816  }
1817  if (!v->cur_seg) {
1818  ret = AVERROR_EOF;
1819  goto end;
1820  }
1821  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1822  if (ret > 0)
1823  goto end;
1824 
1825  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1826  if (!v->is_restart_needed)
1827  v->cur_seq_no++;
1828  v->is_restart_needed = 1;
1829  }
1830 
1831 end:
1832  return ret;
1833 }
1834 
1836 {
1837  DASHContext *c = s->priv_data;
1838  const char *opts[] = {
1839  "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", "icy", NULL };
1840  const char **opt = opts;
1841  uint8_t *buf = NULL;
1842  int ret = 0;
1843 
1844  while (*opt) {
1845  if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
1846  if (buf[0] != '\0') {
1847  ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL);
1848  if (ret < 0)
1849  return ret;
1850  } else {
1851  av_freep(&buf);
1852  }
1853  }
1854  opt++;
1855  }
1856 
1857  return ret;
1858 }
1859 
1860 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1861  int flags, AVDictionary **opts)
1862 {
1864  "A DASH playlist item '%s' referred to an external file '%s'. "
1865  "Opening this file was forbidden for security reasons\n",
1866  s->url, url);
1867  return AVERROR(EPERM);
1868 }
1869 
1871 {
1872  /* note: the internal buffer could have changed */
1873  av_freep(&pls->pb.buffer);
1874  memset(&pls->pb, 0x00, sizeof(AVIOContext));
1875  pls->ctx->pb = NULL;
1876  avformat_close_input(&pls->ctx);
1877 }
1878 
1880 {
1881  DASHContext *c = s->priv_data;
1882  ff_const59 AVInputFormat *in_fmt = NULL;
1883  AVDictionary *in_fmt_opts = NULL;
1884  uint8_t *avio_ctx_buffer = NULL;
1885  int ret = 0, i;
1886 
1887  if (pls->ctx) {
1889  }
1890 
1891  if (ff_check_interrupt(&s->interrupt_callback)) {
1892  ret = AVERROR_EXIT;
1893  goto fail;
1894  }
1895 
1896  if (!(pls->ctx = avformat_alloc_context())) {
1897  ret = AVERROR(ENOMEM);
1898  goto fail;
1899  }
1900 
1901  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1902  if (!avio_ctx_buffer ) {
1903  ret = AVERROR(ENOMEM);
1904  avformat_free_context(pls->ctx);
1905  pls->ctx = NULL;
1906  goto fail;
1907  }
1908  ffio_init_context(&pls->pb, avio_ctx_buffer, INITIAL_BUFFER_SIZE, 0,
1909  pls, read_data, NULL, c->is_live ? NULL : seek_data);
1910  pls->pb.seekable = 0;
1911 
1912  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1913  goto fail;
1914 
1915  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1916  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;
1917  pls->ctx->max_analyze_duration = s->max_analyze_duration > 0 ? s->max_analyze_duration : 4 * AV_TIME_BASE;
1918  pls->ctx->interrupt_callback = s->interrupt_callback;
1919  ret = av_probe_input_buffer(&pls->pb, &in_fmt, "", NULL, 0, 0);
1920  if (ret < 0) {
1921  av_log(s, AV_LOG_ERROR, "Error when loading first fragment of playlist\n");
1922  avformat_free_context(pls->ctx);
1923  pls->ctx = NULL;
1924  goto fail;
1925  }
1926 
1927  pls->ctx->pb = &pls->pb;
1928  pls->ctx->io_open = nested_io_open;
1929 
1930  // provide additional information from mpd if available
1931  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1932  av_dict_free(&in_fmt_opts);
1933  if (ret < 0)
1934  goto fail;
1935  if (pls->n_fragments) {
1936 #if FF_API_R_FRAME_RATE
1937  if (pls->framerate.den) {
1938  for (i = 0; i < pls->ctx->nb_streams; i++)
1939  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1940  }
1941 #endif
1943  if (ret < 0)
1944  goto fail;
1945  }
1946 
1947 fail:
1948  return ret;
1949 }
1950 
1952 {
1953  int ret = 0;
1954  int i;
1955 
1956  pls->parent = s;
1957  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1958 
1959  if (!pls->last_seq_no) {
1960  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1961  }
1962 
1964  if (ret < 0) {
1965  goto fail;
1966  }
1967  for (i = 0; i < pls->ctx->nb_streams; i++) {
1969  AVStream *ist = pls->ctx->streams[i];
1970  if (!st) {
1971  ret = AVERROR(ENOMEM);
1972  goto fail;
1973  }
1974  st->id = i;
1977 
1978  // copy disposition
1979  st->disposition = ist->disposition;
1980 
1981  // copy side data
1982  for (int i = 0; i < ist->nb_side_data; i++) {
1983  const AVPacketSideData *sd_src = &ist->side_data[i];
1984  uint8_t *dst_data;
1985 
1986  dst_data = av_stream_new_side_data(st, sd_src->type, sd_src->size);
1987  if (!dst_data)
1988  return AVERROR(ENOMEM);
1989  memcpy(dst_data, sd_src->data, sd_src->size);
1990  }
1991  }
1992 
1993  return 0;
1994 fail:
1995  return ret;
1996 }
1997 
1998 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1999 {
2000  struct fragment *first_init_section = pls[0]->init_section;
2001  char *url =NULL;
2002  int64_t url_offset = -1;
2003  int64_t size = -1;
2004  int i = 0;
2005 
2006  if (first_init_section == NULL || n_pls == 0)
2007  return 0;
2008 
2009  url = first_init_section->url;
2010  url_offset = first_init_section->url_offset;
2011  size = pls[0]->init_section->size;
2012  for (i=0;i<n_pls;i++) {
2013  if (!pls[i]->init_section)
2014  continue;
2015 
2016  if (av_strcasecmp(pls[i]->init_section->url, url) ||
2017  pls[i]->init_section->url_offset != url_offset ||
2018  pls[i]->init_section->size != size) {
2019  return 0;
2020  }
2021  }
2022  return 1;
2023 }
2024 
2025 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
2026 {
2027  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
2028  if (!rep_dest->init_sec_buf) {
2029  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
2030  return AVERROR(ENOMEM);
2031  }
2032  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2033  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2034  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2035  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2036 
2037  return 0;
2038 }
2039 
2040 static int dash_close(AVFormatContext *s);
2041 
2042 static void move_metadata(AVStream *st, const char *key, char **value)
2043 {
2044  if (*value) {
2046  *value = NULL;
2047  }
2048 }
2049 
2051 {
2052  DASHContext *c = s->priv_data;
2053  struct representation *rep;
2054  AVProgram *program;
2055  int ret = 0;
2056  int stream_index = 0;
2057  int i;
2058 
2059  c->interrupt_callback = &s->interrupt_callback;
2060 
2061  if ((ret = save_avio_options(s)) < 0)
2062  goto fail;
2063 
2064  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2065  goto fail;
2066 
2067  /* If this isn't a live stream, fill the total duration of the
2068  * stream. */
2069  if (!c->is_live) {
2070  s->duration = (int64_t) c->media_presentation_duration * AV_TIME_BASE;
2071  } else {
2072  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2073  }
2074 
2075  if(c->n_videos)
2076  c->is_init_section_common_video = is_common_init_section_exist(c->videos, c->n_videos);
2077 
2078  /* Open the demuxer for video and audio components if available */
2079  for (i = 0; i < c->n_videos; i++) {
2080  rep = c->videos[i];
2081  if (i > 0 && c->is_init_section_common_video) {
2082  ret = copy_init_section(rep, c->videos[0]);
2083  if (ret < 0)
2084  goto fail;
2085  }
2086  ret = open_demux_for_component(s, rep);
2087 
2088  if (ret)
2089  goto fail;
2090  rep->stream_index = stream_index;
2091  ++stream_index;
2092  }
2093 
2094  if(c->n_audios)
2095  c->is_init_section_common_audio = is_common_init_section_exist(c->audios, c->n_audios);
2096 
2097  for (i = 0; i < c->n_audios; i++) {
2098  rep = c->audios[i];
2099  if (i > 0 && c->is_init_section_common_audio) {
2100  ret = copy_init_section(rep, c->audios[0]);
2101  if (ret < 0)
2102  goto fail;
2103  }
2104  ret = open_demux_for_component(s, rep);
2105 
2106  if (ret)
2107  goto fail;
2108  rep->stream_index = stream_index;
2109  ++stream_index;
2110  }
2111 
2112  if (c->n_subtitles)
2113  c->is_init_section_common_subtitle = is_common_init_section_exist(c->subtitles, c->n_subtitles);
2114 
2115  for (i = 0; i < c->n_subtitles; i++) {
2116  rep = c->subtitles[i];
2117  if (i > 0 && c->is_init_section_common_subtitle) {
2118  ret = copy_init_section(rep, c->subtitles[0]);
2119  if (ret < 0)
2120  goto fail;
2121  }
2122  ret = open_demux_for_component(s, rep);
2123 
2124  if (ret)
2125  goto fail;
2126  rep->stream_index = stream_index;
2127  ++stream_index;
2128  }
2129 
2130  if (!stream_index) {
2132  goto fail;
2133  }
2134 
2135  /* Create a program */
2136  program = av_new_program(s, 0);
2137  if (!program) {
2138  ret = AVERROR(ENOMEM);
2139  goto fail;
2140  }
2141 
2142  for (i = 0; i < c->n_videos; i++) {
2143  rep = c->videos[i];
2145  rep->assoc_stream = s->streams[rep->stream_index];
2146  if (rep->bandwidth > 0)
2147  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2148  move_metadata(rep->assoc_stream, "id", &rep->id);
2149  }
2150  for (i = 0; i < c->n_audios; i++) {
2151  rep = c->audios[i];
2153  rep->assoc_stream = s->streams[rep->stream_index];
2154  if (rep->bandwidth > 0)
2155  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2156  move_metadata(rep->assoc_stream, "id", &rep->id);
2157  move_metadata(rep->assoc_stream, "language", &rep->lang);
2158  }
2159  for (i = 0; i < c->n_subtitles; i++) {
2160  rep = c->subtitles[i];
2162  rep->assoc_stream = s->streams[rep->stream_index];
2163  move_metadata(rep->assoc_stream, "id", &rep->id);
2164  move_metadata(rep->assoc_stream, "language", &rep->lang);
2165  }
2166 
2167  return 0;
2168 fail:
2169  dash_close(s);
2170  return ret;
2171 }
2172 
2173 static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
2174 {
2175  int i, j;
2176 
2177  for (i = 0; i < n; i++) {
2178  struct representation *pls = p[i];
2179  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2180 
2181  if (needed && !pls->ctx) {
2182  pls->cur_seg_offset = 0;
2183  pls->init_sec_buf_read_offset = 0;
2184  /* Catch up */
2185  for (j = 0; j < n; j++) {
2186  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2187  }
2189  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2190  } else if (!needed && pls->ctx) {
2192  ff_format_io_close(pls->parent, &pls->input);
2193  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2194  }
2195  }
2196 }
2197 
2199 {
2200  DASHContext *c = s->priv_data;
2201  int ret = 0, i;
2202  int64_t mints = 0;
2203  struct representation *cur = NULL;
2204  struct representation *rep = NULL;
2205 
2206  recheck_discard_flags(s, c->videos, c->n_videos);
2207  recheck_discard_flags(s, c->audios, c->n_audios);
2208  recheck_discard_flags(s, c->subtitles, c->n_subtitles);
2209 
2210  for (i = 0; i < c->n_videos; i++) {
2211  rep = c->videos[i];
2212  if (!rep->ctx)
2213  continue;
2214  if (!cur || rep->cur_timestamp < mints) {
2215  cur = rep;
2216  mints = rep->cur_timestamp;
2217  }
2218  }
2219  for (i = 0; i < c->n_audios; i++) {
2220  rep = c->audios[i];
2221  if (!rep->ctx)
2222  continue;
2223  if (!cur || rep->cur_timestamp < mints) {
2224  cur = rep;
2225  mints = rep->cur_timestamp;
2226  }
2227  }
2228 
2229  for (i = 0; i < c->n_subtitles; i++) {
2230  rep = c->subtitles[i];
2231  if (!rep->ctx)
2232  continue;
2233  if (!cur || rep->cur_timestamp < mints) {
2234  cur = rep;
2235  mints = rep->cur_timestamp;
2236  }
2237  }
2238 
2239  if (!cur) {
2240  return AVERROR_INVALIDDATA;
2241  }
2242  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2243  ret = av_read_frame(cur->ctx, pkt);
2244  if (ret >= 0) {
2245  /* If we got a packet, return it */
2246  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2247  pkt->stream_index = cur->stream_index;
2248  return 0;
2249  }
2250  if (cur->is_restart_needed) {
2251  cur->cur_seg_offset = 0;
2252  cur->init_sec_buf_read_offset = 0;
2253  ff_format_io_close(cur->parent, &cur->input);
2255  cur->is_restart_needed = 0;
2256  }
2257  }
2258  return AVERROR_EOF;
2259 }
2260 
2262 {
2263  DASHContext *c = s->priv_data;
2264  free_audio_list(c);
2265  free_video_list(c);
2267  av_dict_free(&c->avio_opts);
2268  av_freep(&c->base_url);
2269  return 0;
2270 }
2271 
2272 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2273 {
2274  int ret = 0;
2275  int i = 0;
2276  int j = 0;
2277  int64_t duration = 0;
2278 
2279  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms] %s\n",
2280  seek_pos_msec, dry_run ? " (dry)" : "");
2281 
2282  // single fragment mode
2283  if (pls->n_fragments == 1) {
2284  pls->cur_timestamp = 0;
2285  pls->cur_seg_offset = 0;
2286  if (dry_run)
2287  return 0;
2288  ff_read_frame_flush(pls->ctx);
2289  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2290  }
2291 
2292  ff_format_io_close(pls->parent, &pls->input);
2293 
2294  // find the nearest fragment
2295  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2296  int64_t num = pls->first_seq_no;
2297  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2298  "last_seq_no[%"PRId64"].\n",
2299  (int)pls->n_timelines, (int64_t)pls->last_seq_no);
2300  for (i = 0; i < pls->n_timelines; i++) {
2301  if (pls->timelines[i]->starttime > 0) {
2302  duration = pls->timelines[i]->starttime;
2303  }
2304  duration += pls->timelines[i]->duration;
2305  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2306  goto set_seq_num;
2307  }
2308  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2309  duration += pls->timelines[i]->duration;
2310  num++;
2311  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2312  goto set_seq_num;
2313  }
2314  }
2315  num++;
2316  }
2317 
2318 set_seq_num:
2319  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2320  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"].\n",
2321  (int64_t)pls->cur_seq_no);
2322  } else if (pls->fragment_duration > 0) {
2323  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2324  } else {
2325  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2326  pls->cur_seq_no = pls->first_seq_no;
2327  }
2328  pls->cur_timestamp = 0;
2329  pls->cur_seg_offset = 0;
2330  pls->init_sec_buf_read_offset = 0;
2331  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2332 
2333  return ret;
2334 }
2335 
2336 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2337 {
2338  int ret = 0, i;
2339  DASHContext *c = s->priv_data;
2340  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2341  s->streams[stream_index]->time_base.den,
2344  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2345  return AVERROR(ENOSYS);
2346 
2347  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2348  for (i = 0; i < c->n_videos; i++) {
2349  if (!ret)
2350  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2351  }
2352  for (i = 0; i < c->n_audios; i++) {
2353  if (!ret)
2354  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2355  }
2356  for (i = 0; i < c->n_subtitles; i++) {
2357  if (!ret)
2358  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2359  }
2360 
2361  return ret;
2362 }
2363 
2364 static int dash_probe(const AVProbeData *p)
2365 {
2366  if (!av_stristr(p->buf, "<MPD"))
2367  return 0;
2368 
2369  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2370  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2371  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2372  av_stristr(p->buf, "dash:profile:isoff-main:2011") ||
2373  av_stristr(p->buf, "3GPP:PSS:profile:DASH1")) {
2374  return AVPROBE_SCORE_MAX;
2375  }
2376  if (av_stristr(p->buf, "dash:profile")) {
2377  return AVPROBE_SCORE_MAX;
2378  }
2379 
2380  return 0;
2381 }
2382 
2383 #define OFFSET(x) offsetof(DASHContext, x)
2384 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2385 static const AVOption dash_options[] = {
2386  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2387  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2388  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm,ts"},
2389  INT_MIN, INT_MAX, FLAGS},
2390  {NULL}
2391 };
2392 
2393 static const AVClass dash_class = {
2394  .class_name = "dash",
2395  .item_name = av_default_item_name,
2396  .option = dash_options,
2397  .version = LIBAVUTIL_VERSION_INT,
2398 };
2399 
2401  .name = "dash",
2402  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2403  .priv_class = &dash_class,
2404  .priv_data_size = sizeof(DASHContext),
2411 };
reopen_demux_for_component
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1879
close_demux_for_component
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1870
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
calc_next_seg_no_from_timelines
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:287
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:470
program
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:6
open_demux_for_component
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1951
read_data
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1772
representation::start_number
int64_t start_number
Definition: dashdec.c:99
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
avformat_new_stream
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4509
get_current_time_in_sec
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:173
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1169
ishttp
static int ishttp(char *url)
Definition: dashdec.c:162
calc_min_seg_no
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1414
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
FLAGS
#define FLAGS
Definition: dashdec.c:2384
av_stristr
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle.
Definition: avstring.c:56
representation::assoc_stream
AVStream * assoc_stream
Definition: dashdec.c:89
free_video_list
static void free_video_list(DASHContext *c)
Definition: dashdec.c:368
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
representation::init_sec_buf_read_offset
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:116
representation::cur_seq_no
int64_t cur_seq_no
Definition: dashdec.c:106
get_current_fragment
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1585
DASHContext::n_subtitles
int n_subtitles
Definition: dashdec.c:129
DASHContext::is_init_section_common_subtitle
int is_init_section_common_subtitle
Definition: dashdec.c:158
av_strcasecmp
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:215
representation::cur_seg_offset
int64_t cur_seg_offset
Definition: dashdec.c:107
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:27
dash_close
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2261
cleanup
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:127
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1300
AVPacketSideData
Definition: packet.h:306
MAX_BPRINT_READ_SIZE
#define MAX_BPRINT_READ_SIZE
Definition: dashdec.c:32
AVOption
AVOption.
Definition: opt.h:248
DASHContext::interrupt_callback
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:150
parse_manifest_segmenturlnode
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:594
AVFMT_FLAG_CUSTOM_IO
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don't avio_close() it.
Definition: avformat.h:1371
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2416
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
representation::id
char * id
Definition: dashdec.c:85
DASHContext::n_audios
int n_audios
Definition: dashdec.c:127
AVDictionary
Definition: dict.c:30
representation::last_seq_no
int64_t last_seq_no
Definition: dashdec.c:98
AVFormatContext::probesize
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1400
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1741
read_from_url
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1668
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:342
representation::n_fragments
int n_fragments
Definition: dashdec.c:91
DASHContext::availability_end_time
uint64_t availability_end_time
Definition: dashdec.c:136
find_child_node_by_name
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:533
representation::first_seq_no
int64_t first_seq_no
Definition: dashdec.c:97
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
AVIOInterruptCB
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
fragment
Definition: dashdec.c:35
ff_const59
#define ff_const59
The ff_const59 define is not part of the public API and will be removed without further warning.
Definition: avformat.h:535
DASHContext::n_videos
int n_videos
Definition: dashdec.c:125
DASHContext
Definition: dashdec.c:121
get_segment_start_time_based_on_timeline
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:252
DASHContext::subtitles
struct representation ** subtitles
Definition: dashdec.c:130
AVPROBE_SCORE_MAX
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:453
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4481
AVPacketSideData::size
size_t size
Definition: packet.h:311
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1512
avio_open2
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1163
representation::init_section
struct fragment * init_section
Definition: dashdec.c:112
finish
static void finish(void)
Definition: movenc.c:342
DASHContext::publish_time
uint64_t publish_time
Definition: dashdec.c:137
free_timelines_list
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:337
calc_max_seg_no
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1428
free_fragment
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:317
fail
#define fail()
Definition: checkasm.h:133
calc_cur_seg_no
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1376
read_seek
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:153
read_close
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
val
static double val(void *priv, double ch)
Definition: aeval.c:76
recheck_discard_flags
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2173
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
AV_ROUND_UP
@ AV_ROUND_UP
Round toward +infinity.
Definition: mathematics.h:83
av_timegm
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:568
av_new_program
AVProgram * av_new_program(AVFormatContext *s, int id)
Definition: utils.c:4607
get_utc_date_time_insec
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:178
get_content_type
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:550
ff_check_interrupt
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:661
AVRational::num
int num
Numerator.
Definition: rational.h:59
dash_options
static const AVOption dash_options[]
Definition: dashdec.c:2385
DASHContext::avio_opts
AVDictionary * avio_opts
Definition: dashdec.c:152
AV_DICT_DONT_STRDUP_VAL
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:74
DASHContext::suggested_presentation_delay
uint64_t suggested_presentation_delay
Definition: dashdec.c:134
seek_data
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1762
aligned
static int aligned(int val)
Definition: dashdec.c:168
representation::n_timelines
int n_timelines
Definition: dashdec.c:94
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:220
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVInputFormat
Definition: avformat.h:640
free_representation
static void free_representation(struct representation *pls)
Definition: dashdec.c:348
duration
int64_t duration
Definition: movenc.c:64
move_metadata
static void move_metadata(AVStream *st, const char *key, char **value)
Definition: dashdec.c:2042
DASHContext::max_url_size
int max_url_size
Definition: dashdec.c:153
DASHContext::allowed_extensions
char * allowed_extensions
Definition: dashdec.c:151
intreadwrite.h
move_segments
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1468
s
#define s(width, name)
Definition: cbs_vp9.c:257
fragment::url_offset
int64_t url_offset
Definition: dashdec.c:36
DASHContext::adaptionset_lang
char * adaptionset_lang
Definition: dashdec.c:147
avio_read_to_bprint
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size)
Read contents of h into print buffer, up to max_size bytes, or up to EOF.
Definition: aviobuf.c:1250
av_seek_frame
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2489
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1363
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:645
free_fragment_list
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:326
AVProbeData::buf
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:443
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
av_match_ext
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:38
representation::is_restart_needed
int is_restart_needed
Definition: dashdec.c:118
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
AVPacketSideData::data
uint8_t * data
Definition: packet.h:307
parse_programinformation
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1163
get_duration_insec
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:208
DASHContext::videos
struct representation ** videos
Definition: dashdec.c:126
INITIAL_BUFFER_SIZE
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:31
key
const char * key
Definition: hwcontext_opencl.c:168
representation::cur_timestamp
int64_t cur_timestamp
Definition: dashdec.c:117
timeline::duration
int64_t duration
Definition: dashdec.c:69
representation::init_sec_buf_size
uint32_t init_sec_buf_size
Definition: dashdec.c:114
representation::stream_index
int stream_index
Definition: dashdec.c:83
int32_t
int32_t
Definition: audio_convert.c:194
AVFormatContext::max_analyze_duration
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1408
representation::ctx
AVFormatContext * ctx
Definition: dashdec.c:82
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AVFormatContext
Format I/O context.
Definition: avformat.h:1232
representation::lang
char * lang
Definition: dashdec.c:86
internal.h
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1038
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVSEEK_FLAG_BACKWARD
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2415
read_header
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:527
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:902
NULL
#define NULL
Definition: coverity.c:32
read_probe
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
av_strireplace
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:237
is_common_init_section_exist
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1998
AVPacketSideData::type
enum AVPacketSideDataType type
Definition: packet.h:313
av_default_item_name
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
dash_read_seek
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2336
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1274
parseutils.h
AVProbeData
This structure contains the data a format has to probe a file.
Definition: avformat.h:441
move_timelines
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1454
representation::timelines
struct timeline ** timelines
Definition: dashdec.c:95
AVStream::metadata
AVDictionary * metadata
Definition: avformat.h:937
DASHContext::minimum_update_period
uint64_t minimum_update_period
Definition: dashdec.c:138
time.h
timeline::starttime
int64_t starttime
Definition: dashdec.c:59
DASHContext::period_start
uint64_t period_start
Definition: dashdec.c:144
parse_manifest
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1192
representation::url_template
char * url_template
Definition: dashdec.c:78
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1288
get_val_from_nodes_tab
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:517
av_strncasecmp
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
av_rescale_rnd
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
DASHContext::time_shift_buffer_depth
uint64_t time_shift_buffer_depth
Definition: dashdec.c:139
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3602
AVIOContext
Bytestream IO Context.
Definition: avio.h:161
resolve_content_path
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:701
ffio_init_context
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:88
AVMediaType
AVMediaType
Definition: avutil.h:199
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:211
DASHContext::media_presentation_duration
uint64_t media_presentation_duration
Definition: dashdec.c:133
AVIOContext::seekable
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
representation::pb
AVIOContext pb
Definition: dashdec.c:79
start_time
static int64_t start_time
Definition: ffplay.c:332
FFMAX
#define FFMAX(a, b)
Definition: common.h:103
avpriv_set_pts_info
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4945
ff_copy_whiteblacklists
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: utils.c:160
size
int size
Definition: twinvq_data.h:10344
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
representation::bandwidth
int bandwidth
Definition: dashdec.c:87
representation::parent
AVFormatContext * parent
Definition: dashdec.c:81
AVMEDIA_TYPE_UNKNOWN
@ AVMEDIA_TYPE_UNKNOWN
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
OFFSET
#define OFFSET(x)
Definition: dashdec.c:2383
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
copy_init_section
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:2025
DASHContext::availability_start_time
uint64_t availability_start_time
Definition: dashdec.c:135
ff_dash_demuxer
AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2400
representation::init_sec_data_len
uint32_t init_sec_data_len
Definition: dashdec.c:115
dash_read_header
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2050
FFMIN
#define FFMIN(a, b)
Definition: common.h:105
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
free_audio_list
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:379
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
representation::framerate
AVRational framerate
Definition: dashdec.c:88
av_strstart
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:34
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
AVStream::side_data
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:975
avcodec_parameters_copy
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: codec_par.c:72
av_parse_video_rate
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
open_url
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary **opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:401
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
i
int i
Definition: input.c:407
free_subtitle_list
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:390
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
avio_internal.h
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:512
dash_probe
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2364
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
DASHContext::audios
struct representation ** audios
Definition: dashdec.c:128
representation::fragment_timescale
int64_t fragment_timescale
Definition: dashdec.c:102
needed
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
Definition: filter_design.txt:212
AV_ROUND_DOWN
@ AV_ROUND_DOWN
Round toward -infinity.
Definition: mathematics.h:82
value
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default value
Definition: writing_filters.txt:86
DASHContext::is_init_section_common_audio
int is_init_section_common_audio
Definition: dashdec.c:157
parse_manifest_adaptationset
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1107
uint8_t
uint8_t
Definition: audio_convert.c:194
fragment::url
char * url
Definition: dashdec.c:38
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1150
save_avio_options
static int save_avio_options(AVFormatContext *s)
Definition: dashdec.c:1835
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
DASHContext::min_buffer_time
uint64_t min_buffer_time
Definition: dashdec.c:140
nested_io_open
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1860
DASHContext::is_live
int is_live
Definition: dashdec.c:149
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
AVStream::id
int id
Format-specific stream ID.
Definition: avformat.h:880
ret
ret
Definition: filter_design.txt:187
read_packet
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVStream
Stream structure.
Definition: avformat.h:873
avio_seek
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:253
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
av_strlcat
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes,...
Definition: avstring.c:93
representation::input
AVIOContext * input
Definition: dashdec.c:80
AVStream::nb_side_data
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:979
get_Fragment
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:576
parse_manifest_segmenttimeline
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:660
representation
Definition: dashdec.c:77
representation::init_sec_buf
uint8_t * init_sec_buf
Definition: dashdec.c:113
av_dynarray_add_nofree
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:296
MAX_URL_SIZE
#define MAX_URL_SIZE
Definition: internal.h:30
parse_manifest_adaptationset_attr
static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
Definition: dashdec.c:1094
AVRational::den
int den
Denominator.
Definition: rational.h:60
representation::cur_seg
struct fragment * cur_seg
Definition: dashdec.c:109
get_content_url
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:466
DASHContext::is_init_section_common_video
int is_init_section_common_video
Definition: dashdec.c:156
avformat_free_context
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4436
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:633
AVStream::r_frame_rate
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1015
refresh_manifest
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1486
AVFormatContext::io_open
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1828
update_init_section
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1716
parse_manifest_representation
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:820
AVPacket::stream_index
int stream_index
Definition: packet.h:371
dash_read_packet
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2198
open_input
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1684
timeline
Definition: dashdec.c:46
av_gettime
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
ff_format_io_close
void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: utils.c:5692
representation::cur_seg_size
int64_t cur_seg_size
Definition: dashdec.c:108
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:674
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVIOContext::buffer
unsigned char * buffer
Start of the buffer.
Definition: avio.h:226
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
av_probe_input_buffer
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:312
ff_make_absolute_url
int ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:319
AVPacket
This structure stores compressed data.
Definition: packet.h:346
ff_read_frame_flush
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: utils.c:1892
ff_dash_fill_tmpl_params
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:96
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
av_dict_copy
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
representation::fragment_duration
int64_t fragment_duration
Definition: dashdec.c:101
av_stream_new_side_data
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, size_t size)
Allocate new information from stream.
avio_find_protocol_name
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:470
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:561
av_strlcpy
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
av_opt_get
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:779
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
dash_seek
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2272
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
timeline::repeat
int64_t repeat
Definition: dashdec.c:65
dash.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
DASHContext::base_url
char * base_url
Definition: dashdec.c:123
AVStream::pts_wrap_bits
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1055
int
int
Definition: ffmpeg_filter.c:170
representation::fragments
struct fragment ** fragments
Definition: dashdec.c:92
AVFormatContext::priv_data
void * priv_data
Format private data.
Definition: avformat.h:1260
dash_class
static const AVClass dash_class
Definition: dashdec.c:2393
DASHContext::period_duration
uint64_t period_duration
Definition: dashdec.c:143
representation::presentation_timeoffset
int64_t presentation_timeoffset
Definition: dashdec.c:104
fragment::size
int64_t size
Definition: dashdec.c:37
DEFAULT_MANIFEST_SIZE
#define DEFAULT_MANIFEST_SIZE
Definition: dashdec.c:33
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:364
av_program_add_stream_index
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)