FFmpeg
dashdec.c
Go to the documentation of this file.
1 /*
2  * Dynamic Adaptive Streaming over HTTP demux
3  * Copyright (c) 2017 samsamsam@o2.pl based on HLS demux
4  * Copyright (c) 2017 Steven Liu
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 #include <libxml/parser.h>
23 #include "libavutil/intreadwrite.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/time.h"
26 #include "libavutil/parseutils.h"
27 #include "internal.h"
28 #include "avio_internal.h"
29 #include "dash.h"
30 
31 #define INITIAL_BUFFER_SIZE 32768
32 #define MAX_BPRINT_READ_SIZE (UINT_MAX - 1)
33 #define DEFAULT_MANIFEST_SIZE 8 * 1024
34 
35 struct fragment {
36  int64_t url_offset;
37  int64_t size;
38  char *url;
39 };
40 
41 /*
42  * reference to : ISO_IEC_23009-1-DASH-2012
43  * Section: 5.3.9.6.2
44  * Table: Table 17 — Semantics of SegmentTimeline element
45  * */
46 struct timeline {
47  /* starttime: Element or Attribute Name
48  * specifies the MPD start time, in @timescale units,
49  * the first Segment in the series starts relative to the beginning of the Period.
50  * The value of this attribute must be equal to or greater than the sum of the previous S
51  * element earliest presentation time and the sum of the contiguous Segment durations.
52  * If the value of the attribute is greater than what is expressed by the previous S element,
53  * it expresses discontinuities in the timeline.
54  * If not present then the value shall be assumed to be zero for the first S element
55  * and for the subsequent S elements, the value shall be assumed to be the sum of
56  * the previous S element's earliest presentation time and contiguous duration
57  * (i.e. previous S@starttime + @duration * (@repeat + 1)).
58  * */
59  int64_t starttime;
60  /* repeat: Element or Attribute Name
61  * specifies the repeat count of the number of following contiguous Segments with
62  * the same duration expressed by the value of @duration. This value is zero-based
63  * (e.g. a value of three means four Segments in the contiguous series).
64  * */
65  int64_t repeat;
66  /* duration: Element or Attribute Name
67  * specifies the Segment duration, in units of the value of the @timescale.
68  * */
69  int64_t duration;
70 };
71 
72 /*
73  * Each playlist has its own demuxer. If it is currently active,
74  * it has an opened AVIOContext too, and potentially an AVPacket
75  * containing the next packet from this stream.
76  */
78  char *url_template;
84 
85  char id[20];
86  char *lang;
87  int bandwidth;
89  AVStream *assoc_stream; /* demuxer stream associated with this representation */
90 
92  struct fragment **fragments; /* VOD list of fragment for profile */
93 
95  struct timeline **timelines;
96 
97  int64_t first_seq_no;
98  int64_t last_seq_no;
99  int64_t start_number; /* used in case when we have dynamic list of segment to know which segments are new one*/
100 
103 
105 
106  int64_t cur_seq_no;
107  int64_t cur_seg_offset;
108  int64_t cur_seg_size;
109  struct fragment *cur_seg;
110 
111  /* Currently active Media Initialization Section */
117  int64_t cur_timestamp;
119 };
120 
121 typedef struct DASHContext {
122  const AVClass *class;
123  char *base_url;
124 
125  int n_videos;
127  int n_audios;
131 
132  /* MediaPresentationDescription Attribute */
137  uint64_t publish_time;
140  uint64_t min_buffer_time;
141 
142  /* Period Attribute */
143  uint64_t period_duration;
144  uint64_t period_start;
145 
146  /* AdaptationSet Attribute */
148 
149  int is_live;
154 
155  /* Flags for init section*/
158 
159 } DASHContext;
160 
161 static int ishttp(char *url)
162 {
163  const char *proto_name = avio_find_protocol_name(url);
164  return av_strstart(proto_name, "http", NULL);
165 }
166 
167 static int aligned(int val)
168 {
169  return ((val + 0x3F) >> 6) << 6;
170 }
171 
172 static uint64_t get_current_time_in_sec(void)
173 {
174  return av_gettime() / 1000000;
175 }
176 
177 static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
178 {
179  struct tm timeinfo;
180  int year = 0;
181  int month = 0;
182  int day = 0;
183  int hour = 0;
184  int minute = 0;
185  int ret = 0;
186  float second = 0.0;
187 
188  /* ISO-8601 date parser */
189  if (!datetime)
190  return 0;
191 
192  ret = sscanf(datetime, "%d-%d-%dT%d:%d:%fZ", &year, &month, &day, &hour, &minute, &second);
193  /* year, month, day, hour, minute, second 6 arguments */
194  if (ret != 6) {
195  av_log(s, AV_LOG_WARNING, "get_utc_date_time_insec get a wrong time format\n");
196  }
197  timeinfo.tm_year = year - 1900;
198  timeinfo.tm_mon = month - 1;
199  timeinfo.tm_mday = day;
200  timeinfo.tm_hour = hour;
201  timeinfo.tm_min = minute;
202  timeinfo.tm_sec = (int)second;
203 
204  return av_timegm(&timeinfo);
205 }
206 
207 static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
208 {
209  /* ISO-8601 duration parser */
210  uint32_t days = 0;
211  uint32_t hours = 0;
212  uint32_t mins = 0;
213  uint32_t secs = 0;
214  int size = 0;
215  float value = 0;
216  char type = '\0';
217  const char *ptr = duration;
218 
219  while (*ptr) {
220  if (*ptr == 'P' || *ptr == 'T') {
221  ptr++;
222  continue;
223  }
224 
225  if (sscanf(ptr, "%f%c%n", &value, &type, &size) != 2) {
226  av_log(s, AV_LOG_WARNING, "get_duration_insec get a wrong time format\n");
227  return 0; /* parser error */
228  }
229  switch (type) {
230  case 'D':
231  days = (uint32_t)value;
232  break;
233  case 'H':
234  hours = (uint32_t)value;
235  break;
236  case 'M':
237  mins = (uint32_t)value;
238  break;
239  case 'S':
240  secs = (uint32_t)value;
241  break;
242  default:
243  // handle invalid type
244  break;
245  }
246  ptr += size;
247  }
248  return ((days * 24 + hours) * 60 + mins) * 60 + secs;
249 }
250 
251 static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
252 {
253  int64_t start_time = 0;
254  int64_t i = 0;
255  int64_t j = 0;
256  int64_t num = 0;
257 
258  if (pls->n_timelines) {
259  for (i = 0; i < pls->n_timelines; i++) {
260  if (pls->timelines[i]->starttime > 0) {
261  start_time = pls->timelines[i]->starttime;
262  }
263  if (num == cur_seq_no)
264  goto finish;
265 
266  start_time += pls->timelines[i]->duration;
267 
268  if (pls->timelines[i]->repeat == -1) {
269  start_time = pls->timelines[i]->duration * cur_seq_no;
270  goto finish;
271  }
272 
273  for (j = 0; j < pls->timelines[i]->repeat; j++) {
274  num++;
275  if (num == cur_seq_no)
276  goto finish;
277  start_time += pls->timelines[i]->duration;
278  }
279  num++;
280  }
281  }
282 finish:
283  return start_time;
284 }
285 
286 static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
287 {
288  int64_t i = 0;
289  int64_t j = 0;
290  int64_t num = 0;
291  int64_t start_time = 0;
292 
293  for (i = 0; i < pls->n_timelines; i++) {
294  if (pls->timelines[i]->starttime > 0) {
295  start_time = pls->timelines[i]->starttime;
296  }
297  if (start_time > cur_time)
298  goto finish;
299 
300  start_time += pls->timelines[i]->duration;
301  for (j = 0; j < pls->timelines[i]->repeat; j++) {
302  num++;
303  if (start_time > cur_time)
304  goto finish;
305  start_time += pls->timelines[i]->duration;
306  }
307  num++;
308  }
309 
310  return -1;
311 
312 finish:
313  return num;
314 }
315 
316 static void free_fragment(struct fragment **seg)
317 {
318  if (!(*seg)) {
319  return;
320  }
321  av_freep(&(*seg)->url);
322  av_freep(seg);
323 }
324 
325 static void free_fragment_list(struct representation *pls)
326 {
327  int i;
328 
329  for (i = 0; i < pls->n_fragments; i++) {
330  free_fragment(&pls->fragments[i]);
331  }
332  av_freep(&pls->fragments);
333  pls->n_fragments = 0;
334 }
335 
336 static void free_timelines_list(struct representation *pls)
337 {
338  int i;
339 
340  for (i = 0; i < pls->n_timelines; i++) {
341  av_freep(&pls->timelines[i]);
342  }
343  av_freep(&pls->timelines);
344  pls->n_timelines = 0;
345 }
346 
347 static void free_representation(struct representation *pls)
348 {
349  free_fragment_list(pls);
350  free_timelines_list(pls);
351  free_fragment(&pls->cur_seg);
353  av_freep(&pls->init_sec_buf);
354  av_freep(&pls->pb.buffer);
355  ff_format_io_close(pls->parent, &pls->input);
356  if (pls->ctx) {
357  pls->ctx->pb = NULL;
358  avformat_close_input(&pls->ctx);
359  }
360 
361  av_freep(&pls->url_template);
362  av_freep(&pls->lang);
363  av_freep(&pls);
364 }
365 
367 {
368  int i;
369  for (i = 0; i < c->n_videos; i++) {
370  struct representation *pls = c->videos[i];
371  free_representation(pls);
372  }
373  av_freep(&c->videos);
374  c->n_videos = 0;
375 }
376 
378 {
379  int i;
380  for (i = 0; i < c->n_audios; i++) {
381  struct representation *pls = c->audios[i];
382  free_representation(pls);
383  }
384  av_freep(&c->audios);
385  c->n_audios = 0;
386 }
387 
389 {
390  int i;
391  for (i = 0; i < c->n_subtitles; i++) {
392  struct representation *pls = c->subtitles[i];
393  free_representation(pls);
394  }
395  av_freep(&c->subtitles);
396  c->n_subtitles = 0;
397 }
398 
399 static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
400  AVDictionary **opts, AVDictionary *opts2, int *is_http)
401 {
402  DASHContext *c = s->priv_data;
403  AVDictionary *tmp = NULL;
404  const char *proto_name = NULL;
405  int ret;
406 
407  if (av_strstart(url, "crypto", NULL)) {
408  if (url[6] == '+' || url[6] == ':')
409  proto_name = avio_find_protocol_name(url + 7);
410  }
411 
412  if (!proto_name)
413  proto_name = avio_find_protocol_name(url);
414 
415  if (!proto_name)
416  return AVERROR_INVALIDDATA;
417 
418  // only http(s) & file are allowed
419  if (av_strstart(proto_name, "file", NULL)) {
420  if (strcmp(c->allowed_extensions, "ALL") && !av_match_ext(url, c->allowed_extensions)) {
421  av_log(s, AV_LOG_ERROR,
422  "Filename extension of \'%s\' is not a common multimedia extension, blocked for security reasons.\n"
423  "If you wish to override this adjust allowed_extensions, you can set it to \'ALL\' to allow all\n",
424  url);
425  return AVERROR_INVALIDDATA;
426  }
427  } else if (av_strstart(proto_name, "http", NULL)) {
428  ;
429  } else
430  return AVERROR_INVALIDDATA;
431 
432  if (!strncmp(proto_name, url, strlen(proto_name)) && url[strlen(proto_name)] == ':')
433  ;
434  else if (av_strstart(url, "crypto", NULL) && !strncmp(proto_name, url + 7, strlen(proto_name)) && url[7 + strlen(proto_name)] == ':')
435  ;
436  else if (strcmp(proto_name, "file") || !strncmp(url, "file,", 5))
437  return AVERROR_INVALIDDATA;
438 
439  av_freep(pb);
440  av_dict_copy(&tmp, *opts, 0);
441  av_dict_copy(&tmp, opts2, 0);
442  ret = avio_open2(pb, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);
443  if (ret >= 0) {
444  // update cookies on http response with setcookies.
445  char *new_cookies = NULL;
446 
447  if (!(s->flags & AVFMT_FLAG_CUSTOM_IO))
448  av_opt_get(*pb, "cookies", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&new_cookies);
449 
450  if (new_cookies) {
451  av_dict_set(opts, "cookies", new_cookies, AV_DICT_DONT_STRDUP_VAL);
452  }
453 
454  }
455 
456  av_dict_free(&tmp);
457 
458  if (is_http)
459  *is_http = av_strstart(proto_name, "http", NULL);
460 
461  return ret;
462 }
463 
464 static char *get_content_url(xmlNodePtr *baseurl_nodes,
465  int n_baseurl_nodes,
466  int max_url_size,
467  char *rep_id_val,
468  char *rep_bandwidth_val,
469  char *val)
470 {
471  int i;
472  char *text;
473  char *url = NULL;
474  char *tmp_str = av_mallocz(max_url_size);
475 
476  if (!tmp_str)
477  return NULL;
478 
479  for (i = 0; i < n_baseurl_nodes; ++i) {
480  if (baseurl_nodes[i] &&
481  baseurl_nodes[i]->children &&
482  baseurl_nodes[i]->children->type == XML_TEXT_NODE) {
483  text = xmlNodeGetContent(baseurl_nodes[i]->children);
484  if (text) {
485  memset(tmp_str, 0, max_url_size);
486  ff_make_absolute_url(tmp_str, max_url_size, "", text);
487  xmlFree(text);
488  }
489  }
490  }
491 
492  if (val)
493  ff_make_absolute_url(tmp_str, max_url_size, tmp_str, val);
494 
495  if (rep_id_val) {
496  url = av_strireplace(tmp_str, "$RepresentationID$", rep_id_val);
497  if (!url) {
498  goto end;
499  }
500  av_strlcpy(tmp_str, url, max_url_size);
501  }
502  if (rep_bandwidth_val && tmp_str[0] != '\0') {
503  // free any previously assigned url before reassigning
504  av_free(url);
505  url = av_strireplace(tmp_str, "$Bandwidth$", rep_bandwidth_val);
506  if (!url) {
507  goto end;
508  }
509  }
510 end:
511  av_free(tmp_str);
512  return url;
513 }
514 
515 static char *get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
516 {
517  int i;
518  char *val;
519 
520  for (i = 0; i < n_nodes; ++i) {
521  if (nodes[i]) {
522  val = xmlGetProp(nodes[i], attrname);
523  if (val)
524  return val;
525  }
526  }
527 
528  return NULL;
529 }
530 
531 static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
532 {
533  xmlNodePtr node = rootnode;
534  if (!node) {
535  return NULL;
536  }
537 
538  node = xmlFirstElementChild(node);
539  while (node) {
540  if (!av_strcasecmp(node->name, nodename)) {
541  return node;
542  }
543  node = xmlNextElementSibling(node);
544  }
545  return NULL;
546 }
547 
548 static enum AVMediaType get_content_type(xmlNodePtr node)
549 {
551  int i = 0;
552  const char *attr;
553  char *val = NULL;
554 
555  if (node) {
556  for (i = 0; i < 2; i++) {
557  attr = i ? "mimeType" : "contentType";
558  val = xmlGetProp(node, attr);
559  if (val) {
560  if (av_stristr(val, "video")) {
561  type = AVMEDIA_TYPE_VIDEO;
562  } else if (av_stristr(val, "audio")) {
563  type = AVMEDIA_TYPE_AUDIO;
564  } else if (av_stristr(val, "text")) {
565  type = AVMEDIA_TYPE_SUBTITLE;
566  }
567  xmlFree(val);
568  }
569  }
570  }
571  return type;
572 }
573 
574 static struct fragment * get_Fragment(char *range)
575 {
576  struct fragment * seg = av_mallocz(sizeof(struct fragment));
577 
578  if (!seg)
579  return NULL;
580 
581  seg->size = -1;
582  if (range) {
583  char *str_end_offset;
584  char *str_offset = av_strtok(range, "-", &str_end_offset);
585  seg->url_offset = strtoll(str_offset, NULL, 10);
586  seg->size = strtoll(str_end_offset, NULL, 10) - seg->url_offset + 1;
587  }
588 
589  return seg;
590 }
591 
593  xmlNodePtr fragmenturl_node,
594  xmlNodePtr *baseurl_nodes,
595  char *rep_id_val,
596  char *rep_bandwidth_val)
597 {
598  DASHContext *c = s->priv_data;
599  char *initialization_val = NULL;
600  char *media_val = NULL;
601  char *range_val = NULL;
602  int max_url_size = c ? c->max_url_size: MAX_URL_SIZE;
603  int err;
604 
605  if (!av_strcasecmp(fragmenturl_node->name, "Initialization")) {
606  initialization_val = xmlGetProp(fragmenturl_node, "sourceURL");
607  range_val = xmlGetProp(fragmenturl_node, "range");
608  if (initialization_val || range_val) {
610  rep->init_section = get_Fragment(range_val);
611  xmlFree(range_val);
612  if (!rep->init_section) {
613  xmlFree(initialization_val);
614  return AVERROR(ENOMEM);
615  }
616  rep->init_section->url = get_content_url(baseurl_nodes, 4,
617  max_url_size,
618  rep_id_val,
619  rep_bandwidth_val,
620  initialization_val);
621  xmlFree(initialization_val);
622  if (!rep->init_section->url) {
623  av_freep(&rep->init_section);
624  return AVERROR(ENOMEM);
625  }
626  }
627  } else if (!av_strcasecmp(fragmenturl_node->name, "SegmentURL")) {
628  media_val = xmlGetProp(fragmenturl_node, "media");
629  range_val = xmlGetProp(fragmenturl_node, "mediaRange");
630  if (media_val || range_val) {
631  struct fragment *seg = get_Fragment(range_val);
632  xmlFree(range_val);
633  if (!seg) {
634  xmlFree(media_val);
635  return AVERROR(ENOMEM);
636  }
637  seg->url = get_content_url(baseurl_nodes, 4,
638  max_url_size,
639  rep_id_val,
640  rep_bandwidth_val,
641  media_val);
642  xmlFree(media_val);
643  if (!seg->url) {
644  av_free(seg);
645  return AVERROR(ENOMEM);
646  }
647  err = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
648  if (err < 0) {
649  free_fragment(&seg);
650  return err;
651  }
652  }
653  }
654 
655  return 0;
656 }
657 
659  xmlNodePtr fragment_timeline_node)
660 {
661  xmlAttrPtr attr = NULL;
662  char *val = NULL;
663  int err;
664 
665  if (!av_strcasecmp(fragment_timeline_node->name, "S")) {
666  struct timeline *tml = av_mallocz(sizeof(struct timeline));
667  if (!tml) {
668  return AVERROR(ENOMEM);
669  }
670  attr = fragment_timeline_node->properties;
671  while (attr) {
672  val = xmlGetProp(fragment_timeline_node, attr->name);
673 
674  if (!val) {
675  av_log(s, AV_LOG_WARNING, "parse_manifest_segmenttimeline attr->name = %s val is NULL\n", attr->name);
676  continue;
677  }
678 
679  if (!av_strcasecmp(attr->name, "t")) {
680  tml->starttime = (int64_t)strtoll(val, NULL, 10);
681  } else if (!av_strcasecmp(attr->name, "r")) {
682  tml->repeat =(int64_t) strtoll(val, NULL, 10);
683  } else if (!av_strcasecmp(attr->name, "d")) {
684  tml->duration = (int64_t)strtoll(val, NULL, 10);
685  }
686  attr = attr->next;
687  xmlFree(val);
688  }
689  err = av_dynarray_add_nofree(&rep->timelines, &rep->n_timelines, tml);
690  if (err < 0) {
691  av_free(tml);
692  return err;
693  }
694  }
695 
696  return 0;
697 }
698 
699 static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
700 {
701  char *tmp_str = NULL;
702  char *path = NULL;
703  char *mpdName = NULL;
704  xmlNodePtr node = NULL;
705  char *baseurl = NULL;
706  char *root_url = NULL;
707  char *text = NULL;
708  char *tmp = NULL;
709  int isRootHttp = 0;
710  char token ='/';
711  int start = 0;
712  int rootId = 0;
713  int updated = 0;
714  int size = 0;
715  int i;
716  int tmp_max_url_size = strlen(url);
717 
718  for (i = n_baseurl_nodes-1; i >= 0 ; i--) {
719  text = xmlNodeGetContent(baseurl_nodes[i]);
720  if (!text)
721  continue;
722  tmp_max_url_size += strlen(text);
723  if (ishttp(text)) {
724  xmlFree(text);
725  break;
726  }
727  xmlFree(text);
728  }
729 
730  tmp_max_url_size = aligned(tmp_max_url_size);
731  text = av_mallocz(tmp_max_url_size);
732  if (!text) {
733  updated = AVERROR(ENOMEM);
734  goto end;
735  }
736  av_strlcpy(text, url, strlen(url)+1);
737  tmp = text;
738  while (mpdName = av_strtok(tmp, "/", &tmp)) {
739  size = strlen(mpdName);
740  }
741  av_free(text);
742 
743  path = av_mallocz(tmp_max_url_size);
744  tmp_str = av_mallocz(tmp_max_url_size);
745  if (!tmp_str || !path) {
746  updated = AVERROR(ENOMEM);
747  goto end;
748  }
749 
750  av_strlcpy (path, url, strlen(url) - size + 1);
751  for (rootId = n_baseurl_nodes - 1; rootId > 0; rootId --) {
752  if (!(node = baseurl_nodes[rootId])) {
753  continue;
754  }
755  text = xmlNodeGetContent(node);
756  if (ishttp(text)) {
757  xmlFree(text);
758  break;
759  }
760  xmlFree(text);
761  }
762 
763  node = baseurl_nodes[rootId];
764  baseurl = xmlNodeGetContent(node);
765  root_url = (av_strcasecmp(baseurl, "")) ? baseurl : path;
766  if (node) {
767  xmlNodeSetContent(node, root_url);
768  updated = 1;
769  }
770 
771  size = strlen(root_url);
772  isRootHttp = ishttp(root_url);
773 
774  if (size > 0 && root_url[size - 1] != token) {
775  av_strlcat(root_url, "/", size + 2);
776  size += 2;
777  }
778 
779  for (i = 0; i < n_baseurl_nodes; ++i) {
780  if (i == rootId) {
781  continue;
782  }
783  text = xmlNodeGetContent(baseurl_nodes[i]);
784  if (text && !av_strstart(text, "/", NULL)) {
785  memset(tmp_str, 0, strlen(tmp_str));
786  if (!ishttp(text) && isRootHttp) {
787  av_strlcpy(tmp_str, root_url, size + 1);
788  }
789  start = (text[0] == token);
790  if (start && av_stristr(tmp_str, text)) {
791  char *p = tmp_str;
792  if (!av_strncasecmp(tmp_str, "http://", 7)) {
793  p += 7;
794  } else if (!av_strncasecmp(tmp_str, "https://", 8)) {
795  p += 8;
796  }
797  p = strchr(p, '/');
798  memset(p + 1, 0, strlen(p));
799  }
800  av_strlcat(tmp_str, text + start, tmp_max_url_size);
801  xmlNodeSetContent(baseurl_nodes[i], tmp_str);
802  updated = 1;
803  xmlFree(text);
804  }
805  }
806 
807 end:
808  if (tmp_max_url_size > *max_url_size) {
809  *max_url_size = tmp_max_url_size;
810  }
811  av_free(path);
812  av_free(tmp_str);
813  xmlFree(baseurl);
814  return updated;
815 
816 }
817 
819  xmlNodePtr node,
820  xmlNodePtr adaptionset_node,
821  xmlNodePtr mpd_baseurl_node,
822  xmlNodePtr period_baseurl_node,
823  xmlNodePtr period_segmenttemplate_node,
824  xmlNodePtr period_segmentlist_node,
825  xmlNodePtr fragment_template_node,
826  xmlNodePtr content_component_node,
827  xmlNodePtr adaptionset_baseurl_node,
828  xmlNodePtr adaptionset_segmentlist_node,
829  xmlNodePtr adaptionset_supplementalproperty_node)
830 {
831  int32_t ret = 0;
832  DASHContext *c = s->priv_data;
833  struct representation *rep = NULL;
834  struct fragment *seg = NULL;
835  xmlNodePtr representation_segmenttemplate_node = NULL;
836  xmlNodePtr representation_baseurl_node = NULL;
837  xmlNodePtr representation_segmentlist_node = NULL;
838  xmlNodePtr segmentlists_tab[3];
839  xmlNodePtr fragment_timeline_node = NULL;
840  xmlNodePtr fragment_templates_tab[5];
841  char *val = NULL;
842  xmlNodePtr baseurl_nodes[4];
843  xmlNodePtr representation_node = node;
844  char *rep_id_val, *rep_bandwidth_val;
846 
847  // try get information from representation
848  if (type == AVMEDIA_TYPE_UNKNOWN)
849  type = get_content_type(representation_node);
850  // try get information from contentComponen
851  if (type == AVMEDIA_TYPE_UNKNOWN)
852  type = get_content_type(content_component_node);
853  // try get information from adaption set
854  if (type == AVMEDIA_TYPE_UNKNOWN)
855  type = get_content_type(adaptionset_node);
856  if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO &&
857  type != AVMEDIA_TYPE_SUBTITLE) {
858  av_log(s, AV_LOG_VERBOSE, "Parsing '%s' - skipp not supported representation type\n", url);
859  return 0;
860  }
861 
862  // convert selected representation to our internal struct
863  rep = av_mallocz(sizeof(struct representation));
864  if (!rep)
865  return AVERROR(ENOMEM);
866  if (c->adaptionset_lang) {
867  rep->lang = av_strdup(c->adaptionset_lang);
868  if (!rep->lang) {
869  av_log(s, AV_LOG_ERROR, "alloc language memory failure\n");
870  av_freep(&rep);
871  return AVERROR(ENOMEM);
872  }
873  }
874  rep->parent = s;
875  representation_segmenttemplate_node = find_child_node_by_name(representation_node, "SegmentTemplate");
876  representation_baseurl_node = find_child_node_by_name(representation_node, "BaseURL");
877  representation_segmentlist_node = find_child_node_by_name(representation_node, "SegmentList");
878  rep_id_val = xmlGetProp(representation_node, "id");
879  rep_bandwidth_val = xmlGetProp(representation_node, "bandwidth");
880 
881  baseurl_nodes[0] = mpd_baseurl_node;
882  baseurl_nodes[1] = period_baseurl_node;
883  baseurl_nodes[2] = adaptionset_baseurl_node;
884  baseurl_nodes[3] = representation_baseurl_node;
885 
886  ret = resolve_content_path(s, url, &c->max_url_size, baseurl_nodes, 4);
888  + (rep_id_val ? strlen(rep_id_val) : 0)
889  + (rep_bandwidth_val ? strlen(rep_bandwidth_val) : 0));
890  if (ret == AVERROR(ENOMEM) || ret == 0)
891  goto free;
892  if (representation_segmenttemplate_node || fragment_template_node || period_segmenttemplate_node) {
893  fragment_timeline_node = NULL;
894  fragment_templates_tab[0] = representation_segmenttemplate_node;
895  fragment_templates_tab[1] = adaptionset_segmentlist_node;
896  fragment_templates_tab[2] = fragment_template_node;
897  fragment_templates_tab[3] = period_segmenttemplate_node;
898  fragment_templates_tab[4] = period_segmentlist_node;
899 
900  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "initialization");
901  if (val) {
902  rep->init_section = av_mallocz(sizeof(struct fragment));
903  if (!rep->init_section) {
904  xmlFree(val);
905  goto enomem;
906  }
907  c->max_url_size = aligned(c->max_url_size + strlen(val));
908  rep->init_section->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, val);
909  xmlFree(val);
910  if (!rep->init_section->url)
911  goto enomem;
912  rep->init_section->size = -1;
913  }
914  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "media");
915  if (val) {
916  c->max_url_size = aligned(c->max_url_size + strlen(val));
917  rep->url_template = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, val);
918  xmlFree(val);
919  }
920  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "presentationTimeOffset");
921  if (val) {
922  rep->presentation_timeoffset = (int64_t) strtoll(val, NULL, 10);
923  av_log(s, AV_LOG_TRACE, "rep->presentation_timeoffset = [%"PRId64"]\n", rep->presentation_timeoffset);
924  xmlFree(val);
925  }
926  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "duration");
927  if (val) {
928  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
929  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
930  xmlFree(val);
931  }
932  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "timescale");
933  if (val) {
934  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
935  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
936  xmlFree(val);
937  }
938  val = get_val_from_nodes_tab(fragment_templates_tab, 4, "startNumber");
939  if (val) {
940  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
941  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
942  xmlFree(val);
943  }
944  if (adaptionset_supplementalproperty_node) {
945  if (!av_strcasecmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), "http://dashif.org/guidelines/last-segment-number")) {
946  val = xmlGetProp(adaptionset_supplementalproperty_node,"value");
947  if (!val) {
948  av_log(s, AV_LOG_ERROR, "Missing value attribute in adaptionset_supplementalproperty_node\n");
949  } else {
950  rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
951  xmlFree(val);
952  }
953  }
954  }
955 
956  fragment_timeline_node = find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
957 
958  if (!fragment_timeline_node)
959  fragment_timeline_node = find_child_node_by_name(fragment_template_node, "SegmentTimeline");
960  if (!fragment_timeline_node)
961  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
962  if (!fragment_timeline_node)
963  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
964  if (fragment_timeline_node) {
965  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
966  while (fragment_timeline_node) {
967  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
968  if (ret < 0)
969  goto free;
970  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
971  }
972  }
973  } else if (representation_baseurl_node && !representation_segmentlist_node) {
974  seg = av_mallocz(sizeof(struct fragment));
975  if (!seg)
976  goto enomem;
977  ret = av_dynarray_add_nofree(&rep->fragments, &rep->n_fragments, seg);
978  if (ret < 0) {
979  av_free(seg);
980  goto free;
981  }
982  seg->url = get_content_url(baseurl_nodes, 4, c->max_url_size, rep_id_val, rep_bandwidth_val, NULL);
983  if (!seg->url)
984  goto enomem;
985  seg->size = -1;
986  } else if (representation_segmentlist_node) {
987  // TODO: https://www.brendanlong.com/the-structure-of-an-mpeg-dash-mpd.html
988  // http://www-itec.uni-klu.ac.at/dash/ddash/mpdGenerator.php?fragmentlength=15&type=full
989  xmlNodePtr fragmenturl_node = NULL;
990  segmentlists_tab[0] = representation_segmentlist_node;
991  segmentlists_tab[1] = adaptionset_segmentlist_node;
992  segmentlists_tab[2] = period_segmentlist_node;
993 
994  val = get_val_from_nodes_tab(segmentlists_tab, 3, "duration");
995  if (val) {
996  rep->fragment_duration = (int64_t) strtoll(val, NULL, 10);
997  av_log(s, AV_LOG_TRACE, "rep->fragment_duration = [%"PRId64"]\n", rep->fragment_duration);
998  xmlFree(val);
999  }
1000  val = get_val_from_nodes_tab(segmentlists_tab, 3, "timescale");
1001  if (val) {
1002  rep->fragment_timescale = (int64_t) strtoll(val, NULL, 10);
1003  av_log(s, AV_LOG_TRACE, "rep->fragment_timescale = [%"PRId64"]\n", rep->fragment_timescale);
1004  xmlFree(val);
1005  }
1006  val = get_val_from_nodes_tab(segmentlists_tab, 3, "startNumber");
1007  if (val) {
1008  rep->start_number = rep->first_seq_no = (int64_t) strtoll(val, NULL, 10);
1009  av_log(s, AV_LOG_TRACE, "rep->first_seq_no = [%"PRId64"]\n", rep->first_seq_no);
1010  xmlFree(val);
1011  }
1012 
1013  fragmenturl_node = xmlFirstElementChild(representation_segmentlist_node);
1014  while (fragmenturl_node) {
1015  ret = parse_manifest_segmenturlnode(s, rep, fragmenturl_node,
1016  baseurl_nodes,
1017  rep_id_val,
1018  rep_bandwidth_val);
1019  if (ret < 0)
1020  goto free;
1021  fragmenturl_node = xmlNextElementSibling(fragmenturl_node);
1022  }
1023 
1024  fragment_timeline_node = find_child_node_by_name(adaptionset_segmentlist_node, "SegmentTimeline");
1025  if (!fragment_timeline_node)
1026  fragment_timeline_node = find_child_node_by_name(period_segmentlist_node, "SegmentTimeline");
1027  if (fragment_timeline_node) {
1028  fragment_timeline_node = xmlFirstElementChild(fragment_timeline_node);
1029  while (fragment_timeline_node) {
1030  ret = parse_manifest_segmenttimeline(s, rep, fragment_timeline_node);
1031  if (ret < 0)
1032  goto free;
1033  fragment_timeline_node = xmlNextElementSibling(fragment_timeline_node);
1034  }
1035  }
1036  } else {
1037  av_log(s, AV_LOG_ERROR, "Unknown format of Representation node id[%s] \n", rep_id_val);
1038  goto free;
1039  }
1040 
1041  if (rep->fragment_duration > 0 && !rep->fragment_timescale)
1042  rep->fragment_timescale = 1;
1043  rep->bandwidth = rep_bandwidth_val ? atoi(rep_bandwidth_val) : 0;
1044  strncpy(rep->id, rep_id_val ? rep_id_val : "", sizeof(rep->id));
1045  rep->framerate = av_make_q(0, 0);
1046  if (type == AVMEDIA_TYPE_VIDEO) {
1047  char *rep_framerate_val = xmlGetProp(representation_node, "frameRate");
1048  if (rep_framerate_val) {
1049  ret = av_parse_video_rate(&rep->framerate, rep_framerate_val);
1050  if (ret < 0)
1051  av_log(s, AV_LOG_VERBOSE, "Ignoring invalid frame rate '%s'\n", rep_framerate_val);
1052  xmlFree(rep_framerate_val);
1053  }
1054  }
1055 
1056  switch (type) {
1057  case AVMEDIA_TYPE_VIDEO:
1058  ret = av_dynarray_add_nofree(&c->videos, &c->n_videos, rep);
1059  break;
1060  case AVMEDIA_TYPE_AUDIO:
1061  ret = av_dynarray_add_nofree(&c->audios, &c->n_audios, rep);
1062  break;
1063  case AVMEDIA_TYPE_SUBTITLE:
1064  ret = av_dynarray_add_nofree(&c->subtitles, &c->n_subtitles, rep);
1065  break;
1066  }
1067  if (ret < 0)
1068  goto free;
1069 
1070 end:
1071  if (rep_id_val)
1072  xmlFree(rep_id_val);
1073  if (rep_bandwidth_val)
1074  xmlFree(rep_bandwidth_val);
1075 
1076  return ret;
1077 enomem:
1078  ret = AVERROR(ENOMEM);
1079 free:
1080  free_representation(rep);
1081  goto end;
1082 }
1083 
1084 static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
1085 {
1086  DASHContext *c = s->priv_data;
1087 
1088  if (!adaptionset_node) {
1089  av_log(s, AV_LOG_WARNING, "Cannot get AdaptionSet\n");
1090  return AVERROR(EINVAL);
1091  }
1092  c->adaptionset_lang = xmlGetProp(adaptionset_node, "lang");
1093 
1094  return 0;
1095 }
1096 
1098  xmlNodePtr adaptionset_node,
1099  xmlNodePtr mpd_baseurl_node,
1100  xmlNodePtr period_baseurl_node,
1101  xmlNodePtr period_segmenttemplate_node,
1102  xmlNodePtr period_segmentlist_node)
1103 {
1104  int ret = 0;
1105  DASHContext *c = s->priv_data;
1106  xmlNodePtr fragment_template_node = NULL;
1107  xmlNodePtr content_component_node = NULL;
1108  xmlNodePtr adaptionset_baseurl_node = NULL;
1109  xmlNodePtr adaptionset_segmentlist_node = NULL;
1110  xmlNodePtr adaptionset_supplementalproperty_node = NULL;
1111  xmlNodePtr node = NULL;
1112 
1113  ret = parse_manifest_adaptationset_attr(s, adaptionset_node);
1114  if (ret < 0)
1115  return ret;
1116 
1117  node = xmlFirstElementChild(adaptionset_node);
1118  while (node) {
1119  if (!av_strcasecmp(node->name, "SegmentTemplate")) {
1120  fragment_template_node = node;
1121  } else if (!av_strcasecmp(node->name, "ContentComponent")) {
1122  content_component_node = node;
1123  } else if (!av_strcasecmp(node->name, "BaseURL")) {
1124  adaptionset_baseurl_node = node;
1125  } else if (!av_strcasecmp(node->name, "SegmentList")) {
1126  adaptionset_segmentlist_node = node;
1127  } else if (!av_strcasecmp(node->name, "SupplementalProperty")) {
1128  adaptionset_supplementalproperty_node = node;
1129  } else if (!av_strcasecmp(node->name, "Representation")) {
1130  ret = parse_manifest_representation(s, url, node,
1131  adaptionset_node,
1132  mpd_baseurl_node,
1133  period_baseurl_node,
1134  period_segmenttemplate_node,
1135  period_segmentlist_node,
1136  fragment_template_node,
1137  content_component_node,
1138  adaptionset_baseurl_node,
1139  adaptionset_segmentlist_node,
1140  adaptionset_supplementalproperty_node);
1141  if (ret < 0)
1142  goto err;
1143  }
1144  node = xmlNextElementSibling(node);
1145  }
1146 
1147 err:
1148  xmlFree(c->adaptionset_lang);
1149  c->adaptionset_lang = NULL;
1150  return ret;
1151 }
1152 
1153 static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
1154 {
1155  xmlChar *val = NULL;
1156 
1157  node = xmlFirstElementChild(node);
1158  while (node) {
1159  if (!av_strcasecmp(node->name, "Title")) {
1160  val = xmlNodeGetContent(node);
1161  if (val) {
1162  av_dict_set(&s->metadata, "Title", val, 0);
1163  }
1164  } else if (!av_strcasecmp(node->name, "Source")) {
1165  val = xmlNodeGetContent(node);
1166  if (val) {
1167  av_dict_set(&s->metadata, "Source", val, 0);
1168  }
1169  } else if (!av_strcasecmp(node->name, "Copyright")) {
1170  val = xmlNodeGetContent(node);
1171  if (val) {
1172  av_dict_set(&s->metadata, "Copyright", val, 0);
1173  }
1174  }
1175  node = xmlNextElementSibling(node);
1176  xmlFree(val);
1177  val = NULL;
1178  }
1179  return 0;
1180 }
1181 
1182 static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
1183 {
1184  DASHContext *c = s->priv_data;
1185  int ret = 0;
1186  int close_in = 0;
1187  int64_t filesize = 0;
1188  AVBPrint buf;
1189  AVDictionary *opts = NULL;
1190  xmlDoc *doc = NULL;
1191  xmlNodePtr root_element = NULL;
1192  xmlNodePtr node = NULL;
1193  xmlNodePtr period_node = NULL;
1194  xmlNodePtr tmp_node = NULL;
1195  xmlNodePtr mpd_baseurl_node = NULL;
1196  xmlNodePtr period_baseurl_node = NULL;
1197  xmlNodePtr period_segmenttemplate_node = NULL;
1198  xmlNodePtr period_segmentlist_node = NULL;
1199  xmlNodePtr adaptionset_node = NULL;
1200  xmlAttrPtr attr = NULL;
1201  char *val = NULL;
1202  uint32_t period_duration_sec = 0;
1203  uint32_t period_start_sec = 0;
1204 
1205  if (!in) {
1206  close_in = 1;
1207 
1208  av_dict_copy(&opts, c->avio_opts, 0);
1209  ret = avio_open2(&in, url, AVIO_FLAG_READ, c->interrupt_callback, &opts);
1210  av_dict_free(&opts);
1211  if (ret < 0)
1212  return ret;
1213  }
1214 
1215  if (av_opt_get(in, "location", AV_OPT_SEARCH_CHILDREN, (uint8_t**)&c->base_url) < 0)
1216  c->base_url = av_strdup(url);
1217 
1218  filesize = avio_size(in);
1219  filesize = filesize > 0 ? filesize : DEFAULT_MANIFEST_SIZE;
1220 
1221  if (filesize > MAX_BPRINT_READ_SIZE) {
1222  av_log(s, AV_LOG_ERROR, "Manifest too large: %"PRId64"\n", filesize);
1223  return AVERROR_INVALIDDATA;
1224  }
1225 
1226  av_bprint_init(&buf, filesize + 1, AV_BPRINT_SIZE_UNLIMITED);
1227 
1228  if ((ret = avio_read_to_bprint(in, &buf, MAX_BPRINT_READ_SIZE)) < 0 ||
1229  !avio_feof(in) ||
1230  (filesize = buf.len) == 0) {
1231  av_log(s, AV_LOG_ERROR, "Unable to read to manifest '%s'\n", url);
1232  if (ret == 0)
1233  ret = AVERROR_INVALIDDATA;
1234  } else {
1235  LIBXML_TEST_VERSION
1236 
1237  doc = xmlReadMemory(buf.str, filesize, c->base_url, NULL, 0);
1238  root_element = xmlDocGetRootElement(doc);
1239  node = root_element;
1240 
1241  if (!node) {
1242  ret = AVERROR_INVALIDDATA;
1243  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing root node\n", url);
1244  goto cleanup;
1245  }
1246 
1247  if (node->type != XML_ELEMENT_NODE ||
1248  av_strcasecmp(node->name, "MPD")) {
1249  ret = AVERROR_INVALIDDATA;
1250  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - wrong root node name[%s] type[%d]\n", url, node->name, (int)node->type);
1251  goto cleanup;
1252  }
1253 
1254  val = xmlGetProp(node, "type");
1255  if (!val) {
1256  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing type attrib\n", url);
1257  ret = AVERROR_INVALIDDATA;
1258  goto cleanup;
1259  }
1260  if (!av_strcasecmp(val, "dynamic"))
1261  c->is_live = 1;
1262  xmlFree(val);
1263 
1264  attr = node->properties;
1265  while (attr) {
1266  val = xmlGetProp(node, attr->name);
1267 
1268  if (!av_strcasecmp(attr->name, "availabilityStartTime")) {
1270  av_log(s, AV_LOG_TRACE, "c->availability_start_time = [%"PRId64"]\n", c->availability_start_time);
1271  } else if (!av_strcasecmp(attr->name, "availabilityEndTime")) {
1273  av_log(s, AV_LOG_TRACE, "c->availability_end_time = [%"PRId64"]\n", c->availability_end_time);
1274  } else if (!av_strcasecmp(attr->name, "publishTime")) {
1276  av_log(s, AV_LOG_TRACE, "c->publish_time = [%"PRId64"]\n", c->publish_time);
1277  } else if (!av_strcasecmp(attr->name, "minimumUpdatePeriod")) {
1279  av_log(s, AV_LOG_TRACE, "c->minimum_update_period = [%"PRId64"]\n", c->minimum_update_period);
1280  } else if (!av_strcasecmp(attr->name, "timeShiftBufferDepth")) {
1282  av_log(s, AV_LOG_TRACE, "c->time_shift_buffer_depth = [%"PRId64"]\n", c->time_shift_buffer_depth);
1283  } else if (!av_strcasecmp(attr->name, "minBufferTime")) {
1284  c->min_buffer_time = get_duration_insec(s, val);
1285  av_log(s, AV_LOG_TRACE, "c->min_buffer_time = [%"PRId64"]\n", c->min_buffer_time);
1286  } else if (!av_strcasecmp(attr->name, "suggestedPresentationDelay")) {
1288  av_log(s, AV_LOG_TRACE, "c->suggested_presentation_delay = [%"PRId64"]\n", c->suggested_presentation_delay);
1289  } else if (!av_strcasecmp(attr->name, "mediaPresentationDuration")) {
1291  av_log(s, AV_LOG_TRACE, "c->media_presentation_duration = [%"PRId64"]\n", c->media_presentation_duration);
1292  }
1293  attr = attr->next;
1294  xmlFree(val);
1295  }
1296 
1297  tmp_node = find_child_node_by_name(node, "BaseURL");
1298  if (tmp_node) {
1299  mpd_baseurl_node = xmlCopyNode(tmp_node,1);
1300  } else {
1301  mpd_baseurl_node = xmlNewNode(NULL, "BaseURL");
1302  }
1303 
1304  // at now we can handle only one period, with the longest duration
1305  node = xmlFirstElementChild(node);
1306  while (node) {
1307  if (!av_strcasecmp(node->name, "Period")) {
1308  period_duration_sec = 0;
1309  period_start_sec = 0;
1310  attr = node->properties;
1311  while (attr) {
1312  val = xmlGetProp(node, attr->name);
1313  if (!av_strcasecmp(attr->name, "duration")) {
1314  period_duration_sec = get_duration_insec(s, val);
1315  } else if (!av_strcasecmp(attr->name, "start")) {
1316  period_start_sec = get_duration_insec(s, val);
1317  }
1318  attr = attr->next;
1319  xmlFree(val);
1320  }
1321  if ((period_duration_sec) >= (c->period_duration)) {
1322  period_node = node;
1323  c->period_duration = period_duration_sec;
1324  c->period_start = period_start_sec;
1325  if (c->period_start > 0)
1327  }
1328  } else if (!av_strcasecmp(node->name, "ProgramInformation")) {
1329  parse_programinformation(s, node);
1330  }
1331  node = xmlNextElementSibling(node);
1332  }
1333  if (!period_node) {
1334  av_log(s, AV_LOG_ERROR, "Unable to parse '%s' - missing Period node\n", url);
1335  ret = AVERROR_INVALIDDATA;
1336  goto cleanup;
1337  }
1338 
1339  adaptionset_node = xmlFirstElementChild(period_node);
1340  while (adaptionset_node) {
1341  if (!av_strcasecmp(adaptionset_node->name, "BaseURL")) {
1342  period_baseurl_node = adaptionset_node;
1343  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentTemplate")) {
1344  period_segmenttemplate_node = adaptionset_node;
1345  } else if (!av_strcasecmp(adaptionset_node->name, "SegmentList")) {
1346  period_segmentlist_node = adaptionset_node;
1347  } else if (!av_strcasecmp(adaptionset_node->name, "AdaptationSet")) {
1348  parse_manifest_adaptationset(s, url, adaptionset_node, mpd_baseurl_node, period_baseurl_node, period_segmenttemplate_node, period_segmentlist_node);
1349  }
1350  adaptionset_node = xmlNextElementSibling(adaptionset_node);
1351  }
1352 cleanup:
1353  /*free the document */
1354  xmlFreeDoc(doc);
1355  xmlCleanupParser();
1356  xmlFreeNode(mpd_baseurl_node);
1357  }
1358 
1359  av_bprint_finalize(&buf, NULL);
1360  if (close_in) {
1361  avio_close(in);
1362  }
1363  return ret;
1364 }
1365 
1366 static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
1367 {
1368  DASHContext *c = s->priv_data;
1369  int64_t num = 0;
1370  int64_t start_time_offset = 0;
1371 
1372  if (c->is_live) {
1373  if (pls->n_fragments) {
1374  av_log(s, AV_LOG_TRACE, "in n_fragments mode\n");
1375  num = pls->first_seq_no;
1376  } else if (pls->n_timelines) {
1377  av_log(s, AV_LOG_TRACE, "in n_timelines mode\n");
1378  start_time_offset = get_segment_start_time_based_on_timeline(pls, 0xFFFFFFFF) - 60 * pls->fragment_timescale; // 60 seconds before end
1379  num = calc_next_seg_no_from_timelines(pls, start_time_offset);
1380  if (num == -1)
1381  num = pls->first_seq_no;
1382  else
1383  num += pls->first_seq_no;
1384  } else if (pls->fragment_duration){
1385  av_log(s, AV_LOG_TRACE, "in fragment_duration mode fragment_timescale = %"PRId64", presentation_timeoffset = %"PRId64"\n", pls->fragment_timescale, pls->presentation_timeoffset);
1386  if (pls->presentation_timeoffset) {
1388  } else if (c->publish_time > 0 && !c->availability_start_time) {
1389  if (c->min_buffer_time) {
1391  } else {
1393  }
1394  } else {
1396  }
1397  }
1398  } else {
1399  num = pls->first_seq_no;
1400  }
1401  return num;
1402 }
1403 
1404 static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
1405 {
1406  DASHContext *c = s->priv_data;
1407  int64_t num = 0;
1408 
1409  if (c->is_live && pls->fragment_duration) {
1410  av_log(s, AV_LOG_TRACE, "in live mode\n");
1412  } else {
1413  num = pls->first_seq_no;
1414  }
1415  return num;
1416 }
1417 
1418 static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
1419 {
1420  int64_t num = 0;
1421 
1422  if (pls->n_fragments) {
1423  num = pls->first_seq_no + pls->n_fragments - 1;
1424  } else if (pls->n_timelines) {
1425  int i = 0;
1426  num = pls->first_seq_no + pls->n_timelines - 1;
1427  for (i = 0; i < pls->n_timelines; i++) {
1428  if (pls->timelines[i]->repeat == -1) {
1429  int length_of_each_segment = pls->timelines[i]->duration / pls->fragment_timescale;
1430  num = c->period_duration / length_of_each_segment;
1431  } else {
1432  num += pls->timelines[i]->repeat;
1433  }
1434  }
1435  } else if (c->is_live && pls->fragment_duration) {
1437  } else if (pls->fragment_duration) {
1439  }
1440 
1441  return num;
1442 }
1443 
1444 static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1445 {
1446  if (rep_dest && rep_src ) {
1447  free_timelines_list(rep_dest);
1448  rep_dest->timelines = rep_src->timelines;
1449  rep_dest->n_timelines = rep_src->n_timelines;
1450  rep_dest->first_seq_no = rep_src->first_seq_no;
1451  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1452  rep_src->timelines = NULL;
1453  rep_src->n_timelines = 0;
1454  rep_dest->cur_seq_no = rep_src->cur_seq_no;
1455  }
1456 }
1457 
1458 static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
1459 {
1460  if (rep_dest && rep_src ) {
1461  free_fragment_list(rep_dest);
1462  if (rep_src->start_number > (rep_dest->start_number + rep_dest->n_fragments))
1463  rep_dest->cur_seq_no = 0;
1464  else
1465  rep_dest->cur_seq_no += rep_src->start_number - rep_dest->start_number;
1466  rep_dest->fragments = rep_src->fragments;
1467  rep_dest->n_fragments = rep_src->n_fragments;
1468  rep_dest->parent = rep_src->parent;
1469  rep_dest->last_seq_no = calc_max_seg_no(rep_dest, c);
1470  rep_src->fragments = NULL;
1471  rep_src->n_fragments = 0;
1472  }
1473 }
1474 
1475 
1477 {
1478  int ret = 0, i;
1479  DASHContext *c = s->priv_data;
1480  // save current context
1481  int n_videos = c->n_videos;
1482  struct representation **videos = c->videos;
1483  int n_audios = c->n_audios;
1484  struct representation **audios = c->audios;
1485  int n_subtitles = c->n_subtitles;
1486  struct representation **subtitles = c->subtitles;
1487  char *base_url = c->base_url;
1488 
1489  c->base_url = NULL;
1490  c->n_videos = 0;
1491  c->videos = NULL;
1492  c->n_audios = 0;
1493  c->audios = NULL;
1494  c->n_subtitles = 0;
1495  c->subtitles = NULL;
1496  ret = parse_manifest(s, s->url, NULL);
1497  if (ret)
1498  goto finish;
1499 
1500  if (c->n_videos != n_videos) {
1501  av_log(c, AV_LOG_ERROR,
1502  "new manifest has mismatched no. of video representations, %d -> %d\n",
1503  n_videos, c->n_videos);
1504  return AVERROR_INVALIDDATA;
1505  }
1506  if (c->n_audios != n_audios) {
1507  av_log(c, AV_LOG_ERROR,
1508  "new manifest has mismatched no. of audio representations, %d -> %d\n",
1509  n_audios, c->n_audios);
1510  return AVERROR_INVALIDDATA;
1511  }
1512  if (c->n_subtitles != n_subtitles) {
1513  av_log(c, AV_LOG_ERROR,
1514  "new manifest has mismatched no. of subtitles representations, %d -> %d\n",
1515  n_subtitles, c->n_subtitles);
1516  return AVERROR_INVALIDDATA;
1517  }
1518 
1519  for (i = 0; i < n_videos; i++) {
1520  struct representation *cur_video = videos[i];
1521  struct representation *ccur_video = c->videos[i];
1522  if (cur_video->timelines) {
1523  // calc current time
1524  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_video, cur_video->cur_seq_no) / cur_video->fragment_timescale;
1525  // update segments
1526  ccur_video->cur_seq_no = calc_next_seg_no_from_timelines(ccur_video, currentTime * cur_video->fragment_timescale - 1);
1527  if (ccur_video->cur_seq_no >= 0) {
1528  move_timelines(ccur_video, cur_video, c);
1529  }
1530  }
1531  if (cur_video->fragments) {
1532  move_segments(ccur_video, cur_video, c);
1533  }
1534  }
1535  for (i = 0; i < n_audios; i++) {
1536  struct representation *cur_audio = audios[i];
1537  struct representation *ccur_audio = c->audios[i];
1538  if (cur_audio->timelines) {
1539  // calc current time
1540  int64_t currentTime = get_segment_start_time_based_on_timeline(cur_audio, cur_audio->cur_seq_no) / cur_audio->fragment_timescale;
1541  // update segments
1542  ccur_audio->cur_seq_no = calc_next_seg_no_from_timelines(ccur_audio, currentTime * cur_audio->fragment_timescale - 1);
1543  if (ccur_audio->cur_seq_no >= 0) {
1544  move_timelines(ccur_audio, cur_audio, c);
1545  }
1546  }
1547  if (cur_audio->fragments) {
1548  move_segments(ccur_audio, cur_audio, c);
1549  }
1550  }
1551 
1552 finish:
1553  // restore context
1554  if (c->base_url)
1555  av_free(base_url);
1556  else
1557  c->base_url = base_url;
1558 
1559  if (c->subtitles)
1560  free_subtitle_list(c);
1561  if (c->audios)
1562  free_audio_list(c);
1563  if (c->videos)
1564  free_video_list(c);
1565 
1566  c->n_subtitles = n_subtitles;
1567  c->subtitles = subtitles;
1568  c->n_audios = n_audios;
1569  c->audios = audios;
1570  c->n_videos = n_videos;
1571  c->videos = videos;
1572  return ret;
1573 }
1574 
1575 static struct fragment *get_current_fragment(struct representation *pls)
1576 {
1577  int64_t min_seq_no = 0;
1578  int64_t max_seq_no = 0;
1579  struct fragment *seg = NULL;
1580  struct fragment *seg_ptr = NULL;
1581  DASHContext *c = pls->parent->priv_data;
1582 
1583  while (( !ff_check_interrupt(c->interrupt_callback)&& pls->n_fragments > 0)) {
1584  if (pls->cur_seq_no < pls->n_fragments) {
1585  seg_ptr = pls->fragments[pls->cur_seq_no];
1586  seg = av_mallocz(sizeof(struct fragment));
1587  if (!seg) {
1588  return NULL;
1589  }
1590  seg->url = av_strdup(seg_ptr->url);
1591  if (!seg->url) {
1592  av_free(seg);
1593  return NULL;
1594  }
1595  seg->size = seg_ptr->size;
1596  seg->url_offset = seg_ptr->url_offset;
1597  return seg;
1598  } else if (c->is_live) {
1599  refresh_manifest(pls->parent);
1600  } else {
1601  break;
1602  }
1603  }
1604  if (c->is_live) {
1605  min_seq_no = calc_min_seg_no(pls->parent, pls);
1606  max_seq_no = calc_max_seg_no(pls, c);
1607 
1608  if (pls->timelines || pls->fragments) {
1609  refresh_manifest(pls->parent);
1610  }
1611  if (pls->cur_seq_no <= min_seq_no) {
1612  av_log(pls->parent, AV_LOG_VERBOSE, "old fragment: cur[%"PRId64"] min[%"PRId64"] max[%"PRId64"]\n", (int64_t)pls->cur_seq_no, min_seq_no, max_seq_no);
1613  pls->cur_seq_no = calc_cur_seg_no(pls->parent, pls);
1614  } else if (pls->cur_seq_no > max_seq_no) {
1615  av_log(pls->parent, AV_LOG_VERBOSE, "new fragment: min[%"PRId64"] max[%"PRId64"]\n", min_seq_no, max_seq_no);
1616  }
1617  seg = av_mallocz(sizeof(struct fragment));
1618  if (!seg) {
1619  return NULL;
1620  }
1621  } else if (pls->cur_seq_no <= pls->last_seq_no) {
1622  seg = av_mallocz(sizeof(struct fragment));
1623  if (!seg) {
1624  return NULL;
1625  }
1626  }
1627  if (seg) {
1628  char *tmpfilename= av_mallocz(c->max_url_size);
1629  if (!tmpfilename) {
1630  return NULL;
1631  }
1633  seg->url = av_strireplace(pls->url_template, pls->url_template, tmpfilename);
1634  if (!seg->url) {
1635  av_log(pls->parent, AV_LOG_WARNING, "Unable to resolve template url '%s', try to use origin template\n", pls->url_template);
1636  seg->url = av_strdup(pls->url_template);
1637  if (!seg->url) {
1638  av_log(pls->parent, AV_LOG_ERROR, "Cannot resolve template url '%s'\n", pls->url_template);
1639  av_free(tmpfilename);
1640  return NULL;
1641  }
1642  }
1643  av_free(tmpfilename);
1644  seg->size = -1;
1645  }
1646 
1647  return seg;
1648 }
1649 
1650 static int read_from_url(struct representation *pls, struct fragment *seg,
1651  uint8_t *buf, int buf_size)
1652 {
1653  int ret;
1654 
1655  /* limit read if the fragment was only a part of a file */
1656  if (seg->size >= 0)
1657  buf_size = FFMIN(buf_size, pls->cur_seg_size - pls->cur_seg_offset);
1658 
1659  ret = avio_read(pls->input, buf, buf_size);
1660  if (ret > 0)
1661  pls->cur_seg_offset += ret;
1662 
1663  return ret;
1664 }
1665 
1666 static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
1667 {
1668  AVDictionary *opts = NULL;
1669  char *url = NULL;
1670  int ret = 0;
1671 
1672  url = av_mallocz(c->max_url_size);
1673  if (!url) {
1674  ret = AVERROR(ENOMEM);
1675  goto cleanup;
1676  }
1677 
1678  if (seg->size >= 0) {
1679  /* try to restrict the HTTP request to the part we want
1680  * (if this is in fact a HTTP request) */
1681  av_dict_set_int(&opts, "offset", seg->url_offset, 0);
1682  av_dict_set_int(&opts, "end_offset", seg->url_offset + seg->size, 0);
1683  }
1684 
1685  ff_make_absolute_url(url, c->max_url_size, c->base_url, seg->url);
1686  av_log(pls->parent, AV_LOG_VERBOSE, "DASH request for url '%s', offset %"PRId64"\n",
1687  url, seg->url_offset);
1688  ret = open_url(pls->parent, &pls->input, url, &c->avio_opts, opts, NULL);
1689 
1690 cleanup:
1691  av_free(url);
1692  av_dict_free(&opts);
1693  pls->cur_seg_offset = 0;
1694  pls->cur_seg_size = seg->size;
1695  return ret;
1696 }
1697 
1698 static int update_init_section(struct representation *pls)
1699 {
1700  static const int max_init_section_size = 1024 * 1024;
1701  DASHContext *c = pls->parent->priv_data;
1702  int64_t sec_size;
1703  int64_t urlsize;
1704  int ret;
1705 
1706  if (!pls->init_section || pls->init_sec_buf)
1707  return 0;
1708 
1709  ret = open_input(c, pls, pls->init_section);
1710  if (ret < 0) {
1712  "Failed to open an initialization section\n");
1713  return ret;
1714  }
1715 
1716  if (pls->init_section->size >= 0)
1717  sec_size = pls->init_section->size;
1718  else if ((urlsize = avio_size(pls->input)) >= 0)
1719  sec_size = urlsize;
1720  else
1721  sec_size = max_init_section_size;
1722 
1723  av_log(pls->parent, AV_LOG_DEBUG,
1724  "Downloading an initialization section of size %"PRId64"\n",
1725  sec_size);
1726 
1727  sec_size = FFMIN(sec_size, max_init_section_size);
1728 
1729  av_fast_malloc(&pls->init_sec_buf, &pls->init_sec_buf_size, sec_size);
1730 
1731  ret = read_from_url(pls, pls->init_section, pls->init_sec_buf,
1732  pls->init_sec_buf_size);
1733  ff_format_io_close(pls->parent, &pls->input);
1734 
1735  if (ret < 0)
1736  return ret;
1737 
1738  pls->init_sec_data_len = ret;
1739  pls->init_sec_buf_read_offset = 0;
1740 
1741  return 0;
1742 }
1743 
1744 static int64_t seek_data(void *opaque, int64_t offset, int whence)
1745 {
1746  struct representation *v = opaque;
1747  if (v->n_fragments && !v->init_sec_data_len) {
1748  return avio_seek(v->input, offset, whence);
1749  }
1750 
1751  return AVERROR(ENOSYS);
1752 }
1753 
1754 static int read_data(void *opaque, uint8_t *buf, int buf_size)
1755 {
1756  int ret = 0;
1757  struct representation *v = opaque;
1758  DASHContext *c = v->parent->priv_data;
1759 
1760 restart:
1761  if (!v->input) {
1762  free_fragment(&v->cur_seg);
1763  v->cur_seg = get_current_fragment(v);
1764  if (!v->cur_seg) {
1765  ret = AVERROR_EOF;
1766  goto end;
1767  }
1768 
1769  /* load/update Media Initialization Section, if any */
1770  ret = update_init_section(v);
1771  if (ret)
1772  goto end;
1773 
1774  ret = open_input(c, v, v->cur_seg);
1775  if (ret < 0) {
1777  ret = AVERROR_EXIT;
1778  goto end;
1779  }
1780  av_log(v->parent, AV_LOG_WARNING, "Failed to open fragment of playlist\n");
1781  v->cur_seq_no++;
1782  goto restart;
1783  }
1784  }
1785 
1787  /* Push init section out first before first actual fragment */
1788  int copy_size = FFMIN(v->init_sec_data_len - v->init_sec_buf_read_offset, buf_size);
1789  memcpy(buf, v->init_sec_buf, copy_size);
1790  v->init_sec_buf_read_offset += copy_size;
1791  ret = copy_size;
1792  goto end;
1793  }
1794 
1795  /* check the v->cur_seg, if it is null, get current and double check if the new v->cur_seg*/
1796  if (!v->cur_seg) {
1797  v->cur_seg = get_current_fragment(v);
1798  }
1799  if (!v->cur_seg) {
1800  ret = AVERROR_EOF;
1801  goto end;
1802  }
1803  ret = read_from_url(v, v->cur_seg, buf, buf_size);
1804  if (ret > 0)
1805  goto end;
1806 
1807  if (c->is_live || v->cur_seq_no < v->last_seq_no) {
1808  if (!v->is_restart_needed)
1809  v->cur_seq_no++;
1810  v->is_restart_needed = 1;
1811  }
1812 
1813 end:
1814  return ret;
1815 }
1816 
1818 {
1819  DASHContext *c = s->priv_data;
1820  const char *opts[] = {
1821  "headers", "user_agent", "cookies", "http_proxy", "referer", "rw_timeout", "icy", NULL };
1822  const char **opt = opts;
1823  uint8_t *buf = NULL;
1824  int ret = 0;
1825 
1826  while (*opt) {
1827  if (av_opt_get(s->pb, *opt, AV_OPT_SEARCH_CHILDREN, &buf) >= 0) {
1828  if (buf[0] != '\0') {
1829  ret = av_dict_set(&c->avio_opts, *opt, buf, AV_DICT_DONT_STRDUP_VAL);
1830  if (ret < 0)
1831  return ret;
1832  } else {
1833  av_freep(&buf);
1834  }
1835  }
1836  opt++;
1837  }
1838 
1839  return ret;
1840 }
1841 
1842 static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url,
1843  int flags, AVDictionary **opts)
1844 {
1845  av_log(s, AV_LOG_ERROR,
1846  "A DASH playlist item '%s' referred to an external file '%s'. "
1847  "Opening this file was forbidden for security reasons\n",
1848  s->url, url);
1849  return AVERROR(EPERM);
1850 }
1851 
1853 {
1854  /* note: the internal buffer could have changed */
1855  av_freep(&pls->pb.buffer);
1856  memset(&pls->pb, 0x00, sizeof(AVIOContext));
1857  pls->ctx->pb = NULL;
1858  avformat_close_input(&pls->ctx);
1859 }
1860 
1862 {
1863  DASHContext *c = s->priv_data;
1864  ff_const59 AVInputFormat *in_fmt = NULL;
1865  AVDictionary *in_fmt_opts = NULL;
1866  uint8_t *avio_ctx_buffer = NULL;
1867  int ret = 0, i;
1868 
1869  if (pls->ctx) {
1871  }
1872 
1874  ret = AVERROR_EXIT;
1875  goto fail;
1876  }
1877 
1878  if (!(pls->ctx = avformat_alloc_context())) {
1879  ret = AVERROR(ENOMEM);
1880  goto fail;
1881  }
1882 
1883  avio_ctx_buffer = av_malloc(INITIAL_BUFFER_SIZE);
1884  if (!avio_ctx_buffer ) {
1885  ret = AVERROR(ENOMEM);
1886  avformat_free_context(pls->ctx);
1887  pls->ctx = NULL;
1888  goto fail;
1889  }
1890  ffio_init_context(&pls->pb, avio_ctx_buffer, INITIAL_BUFFER_SIZE, 0,
1891  pls, read_data, NULL, c->is_live ? NULL : seek_data);
1892  pls->pb.seekable = 0;
1893 
1894  if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
1895  goto fail;
1896 
1897  pls->ctx->flags = AVFMT_FLAG_CUSTOM_IO;
1898  pls->ctx->probesize = s->probesize > 0 ? s->probesize : 1024 * 4;
1901  ret = av_probe_input_buffer(&pls->pb, &in_fmt, "", NULL, 0, 0);
1902  if (ret < 0) {
1903  av_log(s, AV_LOG_ERROR, "Error when loading first fragment of playlist\n");
1904  avformat_free_context(pls->ctx);
1905  pls->ctx = NULL;
1906  goto fail;
1907  }
1908 
1909  pls->ctx->pb = &pls->pb;
1910  pls->ctx->io_open = nested_io_open;
1911 
1912  // provide additional information from mpd if available
1913  ret = avformat_open_input(&pls->ctx, "", in_fmt, &in_fmt_opts); //pls->init_section->url
1914  av_dict_free(&in_fmt_opts);
1915  if (ret < 0)
1916  goto fail;
1917  if (pls->n_fragments) {
1918 #if FF_API_R_FRAME_RATE
1919  if (pls->framerate.den) {
1920  for (i = 0; i < pls->ctx->nb_streams; i++)
1921  pls->ctx->streams[i]->r_frame_rate = pls->framerate;
1922  }
1923 #endif
1924  ret = avformat_find_stream_info(pls->ctx, NULL);
1925  if (ret < 0)
1926  goto fail;
1927  }
1928 
1929 fail:
1930  return ret;
1931 }
1932 
1934 {
1935  int ret = 0;
1936  int i;
1937 
1938  pls->parent = s;
1939  pls->cur_seq_no = calc_cur_seg_no(s, pls);
1940 
1941  if (!pls->last_seq_no) {
1942  pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
1943  }
1944 
1945  ret = reopen_demux_for_component(s, pls);
1946  if (ret < 0) {
1947  goto fail;
1948  }
1949  for (i = 0; i < pls->ctx->nb_streams; i++) {
1950  AVStream *st = avformat_new_stream(s, NULL);
1951  AVStream *ist = pls->ctx->streams[i];
1952  if (!st) {
1953  ret = AVERROR(ENOMEM);
1954  goto fail;
1955  }
1956  st->id = i;
1959 
1960  // copy disposition
1961  st->disposition = ist->disposition;
1962 
1963  // copy side data
1964  for (int i = 0; i < ist->nb_side_data; i++) {
1965  const AVPacketSideData *sd_src = &ist->side_data[i];
1966  uint8_t *dst_data;
1967 
1968  dst_data = av_stream_new_side_data(st, sd_src->type, sd_src->size);
1969  if (!dst_data)
1970  return AVERROR(ENOMEM);
1971  memcpy(dst_data, sd_src->data, sd_src->size);
1972  }
1973  }
1974 
1975  return 0;
1976 fail:
1977  return ret;
1978 }
1979 
1980 static int is_common_init_section_exist(struct representation **pls, int n_pls)
1981 {
1982  struct fragment *first_init_section = pls[0]->init_section;
1983  char *url =NULL;
1984  int64_t url_offset = -1;
1985  int64_t size = -1;
1986  int i = 0;
1987 
1988  if (first_init_section == NULL || n_pls == 0)
1989  return 0;
1990 
1991  url = first_init_section->url;
1992  url_offset = first_init_section->url_offset;
1993  size = pls[0]->init_section->size;
1994  for (i=0;i<n_pls;i++) {
1995  if (av_strcasecmp(pls[i]->init_section->url,url) || pls[i]->init_section->url_offset != url_offset || pls[i]->init_section->size != size) {
1996  return 0;
1997  }
1998  }
1999  return 1;
2000 }
2001 
2002 static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
2003 {
2004  rep_dest->init_sec_buf = av_mallocz(rep_src->init_sec_buf_size);
2005  if (!rep_dest->init_sec_buf) {
2006  av_log(rep_dest->ctx, AV_LOG_WARNING, "Cannot alloc memory for init_sec_buf\n");
2007  return AVERROR(ENOMEM);
2008  }
2009  memcpy(rep_dest->init_sec_buf, rep_src->init_sec_buf, rep_src->init_sec_data_len);
2010  rep_dest->init_sec_buf_size = rep_src->init_sec_buf_size;
2011  rep_dest->init_sec_data_len = rep_src->init_sec_data_len;
2012  rep_dest->cur_timestamp = rep_src->cur_timestamp;
2013 
2014  return 0;
2015 }
2016 
2017 static int dash_close(AVFormatContext *s);
2018 
2020 {
2021  DASHContext *c = s->priv_data;
2022  struct representation *rep;
2023  AVProgram *program;
2024  int ret = 0;
2025  int stream_index = 0;
2026  int i;
2027 
2029 
2030  if ((ret = save_avio_options(s)) < 0)
2031  goto fail;
2032 
2033  if ((ret = parse_manifest(s, s->url, s->pb)) < 0)
2034  goto fail;
2035 
2036  /* If this isn't a live stream, fill the total duration of the
2037  * stream. */
2038  if (!c->is_live) {
2040  } else {
2041  av_dict_set(&c->avio_opts, "seekable", "0", 0);
2042  }
2043 
2044  if(c->n_videos)
2046 
2047  /* Open the demuxer for video and audio components if available */
2048  for (i = 0; i < c->n_videos; i++) {
2049  rep = c->videos[i];
2050  if (i > 0 && c->is_init_section_common_video) {
2051  ret = copy_init_section(rep, c->videos[0]);
2052  if (ret < 0)
2053  goto fail;
2054  }
2055  ret = open_demux_for_component(s, rep);
2056 
2057  if (ret)
2058  goto fail;
2059  rep->stream_index = stream_index;
2060  ++stream_index;
2061  }
2062 
2063  if(c->n_audios)
2065 
2066  for (i = 0; i < c->n_audios; i++) {
2067  rep = c->audios[i];
2068  if (i > 0 && c->is_init_section_common_audio) {
2069  ret = copy_init_section(rep, c->audios[0]);
2070  if (ret < 0)
2071  goto fail;
2072  }
2073  ret = open_demux_for_component(s, rep);
2074 
2075  if (ret)
2076  goto fail;
2077  rep->stream_index = stream_index;
2078  ++stream_index;
2079  }
2080 
2081  if (c->n_subtitles)
2083 
2084  for (i = 0; i < c->n_subtitles; i++) {
2085  rep = c->subtitles[i];
2086  if (i > 0 && c->is_init_section_common_audio) {
2087  ret = copy_init_section(rep, c->subtitles[0]);
2088  if (ret < 0)
2089  goto fail;
2090  }
2091  ret = open_demux_for_component(s, rep);
2092 
2093  if (ret)
2094  goto fail;
2095  rep->stream_index = stream_index;
2096  ++stream_index;
2097  }
2098 
2099  if (!stream_index) {
2100  ret = AVERROR_INVALIDDATA;
2101  goto fail;
2102  }
2103 
2104  /* Create a program */
2105  program = av_new_program(s, 0);
2106  if (!program) {
2107  ret = AVERROR(ENOMEM);
2108  goto fail;
2109  }
2110 
2111  for (i = 0; i < c->n_videos; i++) {
2112  rep = c->videos[i];
2114  rep->assoc_stream = s->streams[rep->stream_index];
2115  if (rep->bandwidth > 0)
2116  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2117  if (rep->id[0])
2118  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2119  }
2120  for (i = 0; i < c->n_audios; i++) {
2121  rep = c->audios[i];
2123  rep->assoc_stream = s->streams[rep->stream_index];
2124  if (rep->bandwidth > 0)
2125  av_dict_set_int(&rep->assoc_stream->metadata, "variant_bitrate", rep->bandwidth, 0);
2126  if (rep->id[0])
2127  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2128  if (rep->lang) {
2129  av_dict_set(&rep->assoc_stream->metadata, "language", rep->lang, 0);
2130  av_freep(&rep->lang);
2131  }
2132  }
2133  for (i = 0; i < c->n_subtitles; i++) {
2134  rep = c->subtitles[i];
2136  rep->assoc_stream = s->streams[rep->stream_index];
2137  if (rep->id[0])
2138  av_dict_set(&rep->assoc_stream->metadata, "id", rep->id, 0);
2139  if (rep->lang) {
2140  av_dict_set(&rep->assoc_stream->metadata, "language", rep->lang, 0);
2141  av_freep(&rep->lang);
2142  }
2143  }
2144 
2145  return 0;
2146 fail:
2147  dash_close(s);
2148  return ret;
2149 }
2150 
2151 static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
2152 {
2153  int i, j;
2154 
2155  for (i = 0; i < n; i++) {
2156  struct representation *pls = p[i];
2157  int needed = !pls->assoc_stream || pls->assoc_stream->discard < AVDISCARD_ALL;
2158 
2159  if (needed && !pls->ctx) {
2160  pls->cur_seg_offset = 0;
2161  pls->init_sec_buf_read_offset = 0;
2162  /* Catch up */
2163  for (j = 0; j < n; j++) {
2164  pls->cur_seq_no = FFMAX(pls->cur_seq_no, p[j]->cur_seq_no);
2165  }
2167  av_log(s, AV_LOG_INFO, "Now receiving stream_index %d\n", pls->stream_index);
2168  } else if (!needed && pls->ctx) {
2170  ff_format_io_close(pls->parent, &pls->input);
2171  av_log(s, AV_LOG_INFO, "No longer receiving stream_index %d\n", pls->stream_index);
2172  }
2173  }
2174 }
2175 
2177 {
2178  DASHContext *c = s->priv_data;
2179  int ret = 0, i;
2180  int64_t mints = 0;
2181  struct representation *cur = NULL;
2182  struct representation *rep = NULL;
2183 
2187 
2188  for (i = 0; i < c->n_videos; i++) {
2189  rep = c->videos[i];
2190  if (!rep->ctx)
2191  continue;
2192  if (!cur || rep->cur_timestamp < mints) {
2193  cur = rep;
2194  mints = rep->cur_timestamp;
2195  }
2196  }
2197  for (i = 0; i < c->n_audios; i++) {
2198  rep = c->audios[i];
2199  if (!rep->ctx)
2200  continue;
2201  if (!cur || rep->cur_timestamp < mints) {
2202  cur = rep;
2203  mints = rep->cur_timestamp;
2204  }
2205  }
2206 
2207  for (i = 0; i < c->n_subtitles; i++) {
2208  rep = c->subtitles[i];
2209  if (!rep->ctx)
2210  continue;
2211  if (!cur || rep->cur_timestamp < mints) {
2212  cur = rep;
2213  mints = rep->cur_timestamp;
2214  }
2215  }
2216 
2217  if (!cur) {
2218  return AVERROR_INVALIDDATA;
2219  }
2220  while (!ff_check_interrupt(c->interrupt_callback) && !ret) {
2221  ret = av_read_frame(cur->ctx, pkt);
2222  if (ret >= 0) {
2223  /* If we got a packet, return it */
2224  cur->cur_timestamp = av_rescale(pkt->pts, (int64_t)cur->ctx->streams[0]->time_base.num * 90000, cur->ctx->streams[0]->time_base.den);
2225  pkt->stream_index = cur->stream_index;
2226  return 0;
2227  }
2228  if (cur->is_restart_needed) {
2229  cur->cur_seg_offset = 0;
2230  cur->init_sec_buf_read_offset = 0;
2231  ff_format_io_close(cur->parent, &cur->input);
2232  ret = reopen_demux_for_component(s, cur);
2233  cur->is_restart_needed = 0;
2234  }
2235  }
2236  return AVERROR_EOF;
2237 }
2238 
2240 {
2241  DASHContext *c = s->priv_data;
2242  free_audio_list(c);
2243  free_video_list(c);
2244  free_subtitle_list(c);
2245  av_dict_free(&c->avio_opts);
2246  av_freep(&c->base_url);
2247  return 0;
2248 }
2249 
2250 static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
2251 {
2252  int ret = 0;
2253  int i = 0;
2254  int j = 0;
2255  int64_t duration = 0;
2256 
2257  av_log(pls->parent, AV_LOG_VERBOSE, "DASH seek pos[%"PRId64"ms] %s\n",
2258  seek_pos_msec, dry_run ? " (dry)" : "");
2259 
2260  // single fragment mode
2261  if (pls->n_fragments == 1) {
2262  pls->cur_timestamp = 0;
2263  pls->cur_seg_offset = 0;
2264  if (dry_run)
2265  return 0;
2266  ff_read_frame_flush(pls->ctx);
2267  return av_seek_frame(pls->ctx, -1, seek_pos_msec * 1000, flags);
2268  }
2269 
2270  ff_format_io_close(pls->parent, &pls->input);
2271 
2272  // find the nearest fragment
2273  if (pls->n_timelines > 0 && pls->fragment_timescale > 0) {
2274  int64_t num = pls->first_seq_no;
2275  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline start n_timelines[%d] "
2276  "last_seq_no[%"PRId64"].\n",
2277  (int)pls->n_timelines, (int64_t)pls->last_seq_no);
2278  for (i = 0; i < pls->n_timelines; i++) {
2279  if (pls->timelines[i]->starttime > 0) {
2280  duration = pls->timelines[i]->starttime;
2281  }
2282  duration += pls->timelines[i]->duration;
2283  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2284  goto set_seq_num;
2285  }
2286  for (j = 0; j < pls->timelines[i]->repeat; j++) {
2287  duration += pls->timelines[i]->duration;
2288  num++;
2289  if (seek_pos_msec < ((duration * 1000) / pls->fragment_timescale)) {
2290  goto set_seq_num;
2291  }
2292  }
2293  num++;
2294  }
2295 
2296 set_seq_num:
2297  pls->cur_seq_no = num > pls->last_seq_no ? pls->last_seq_no : num;
2298  av_log(pls->parent, AV_LOG_VERBOSE, "dash_seek with SegmentTimeline end cur_seq_no[%"PRId64"].\n",
2299  (int64_t)pls->cur_seq_no);
2300  } else if (pls->fragment_duration > 0) {
2301  pls->cur_seq_no = pls->first_seq_no + ((seek_pos_msec * pls->fragment_timescale) / pls->fragment_duration) / 1000;
2302  } else {
2303  av_log(pls->parent, AV_LOG_ERROR, "dash_seek missing timeline or fragment_duration\n");
2304  pls->cur_seq_no = pls->first_seq_no;
2305  }
2306  pls->cur_timestamp = 0;
2307  pls->cur_seg_offset = 0;
2308  pls->init_sec_buf_read_offset = 0;
2309  ret = dry_run ? 0 : reopen_demux_for_component(s, pls);
2310 
2311  return ret;
2312 }
2313 
2314 static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2315 {
2316  int ret = 0, i;
2317  DASHContext *c = s->priv_data;
2318  int64_t seek_pos_msec = av_rescale_rnd(timestamp, 1000,
2319  s->streams[stream_index]->time_base.den,
2320  flags & AVSEEK_FLAG_BACKWARD ?
2322  if ((flags & AVSEEK_FLAG_BYTE) || c->is_live)
2323  return AVERROR(ENOSYS);
2324 
2325  /* Seek in discarded streams with dry_run=1 to avoid reopening them */
2326  for (i = 0; i < c->n_videos; i++) {
2327  if (!ret)
2328  ret = dash_seek(s, c->videos[i], seek_pos_msec, flags, !c->videos[i]->ctx);
2329  }
2330  for (i = 0; i < c->n_audios; i++) {
2331  if (!ret)
2332  ret = dash_seek(s, c->audios[i], seek_pos_msec, flags, !c->audios[i]->ctx);
2333  }
2334  for (i = 0; i < c->n_subtitles; i++) {
2335  if (!ret)
2336  ret = dash_seek(s, c->subtitles[i], seek_pos_msec, flags, !c->subtitles[i]->ctx);
2337  }
2338 
2339  return ret;
2340 }
2341 
2342 static int dash_probe(const AVProbeData *p)
2343 {
2344  if (!av_stristr(p->buf, "<MPD"))
2345  return 0;
2346 
2347  if (av_stristr(p->buf, "dash:profile:isoff-on-demand:2011") ||
2348  av_stristr(p->buf, "dash:profile:isoff-live:2011") ||
2349  av_stristr(p->buf, "dash:profile:isoff-live:2012") ||
2350  av_stristr(p->buf, "dash:profile:isoff-main:2011") ||
2351  av_stristr(p->buf, "3GPP:PSS:profile:DASH1")) {
2352  return AVPROBE_SCORE_MAX;
2353  }
2354  if (av_stristr(p->buf, "dash:profile")) {
2355  return AVPROBE_SCORE_MAX;
2356  }
2357 
2358  return 0;
2359 }
2360 
2361 #define OFFSET(x) offsetof(DASHContext, x)
2362 #define FLAGS AV_OPT_FLAG_DECODING_PARAM
2363 static const AVOption dash_options[] = {
2364  {"allowed_extensions", "List of file extensions that dash is allowed to access",
2365  OFFSET(allowed_extensions), AV_OPT_TYPE_STRING,
2366  {.str = "aac,m4a,m4s,m4v,mov,mp4,webm,ts"},
2367  INT_MIN, INT_MAX, FLAGS},
2368  {NULL}
2369 };
2370 
2371 static const AVClass dash_class = {
2372  .class_name = "dash",
2373  .item_name = av_default_item_name,
2374  .option = dash_options,
2375  .version = LIBAVUTIL_VERSION_INT,
2376 };
2377 
2379  .name = "dash",
2380  .long_name = NULL_IF_CONFIG_SMALL("Dynamic Adaptive Streaming over HTTP"),
2381  .priv_class = &dash_class,
2382  .priv_data_size = sizeof(DASHContext),
2389 };
time_t av_timegm(struct tm *tm)
Convert the decomposed UTC time in tm to a time_t value.
Definition: parseutils.c:568
int64_t cur_seg_size
Definition: dashdec.c:108
#define FLAGS
Definition: dashdec.c:2362
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:2407
int64_t probesize
Maximum size of the data read from input for determining the input container format.
Definition: avformat.h:1405
int(* io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **options)
A callback for opening new IO streams.
Definition: avformat.h:1833
AVIOContext * input
Definition: dashdec.c:80
#define NULL
Definition: coverity.c:32
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:346
int64_t url_offset
Definition: dashdec.c:36
#define DEFAULT_MANIFEST_SIZE
Definition: dashdec.c:33
int n_fragments
Definition: dashdec.c:91
char * allowed_extensions
Definition: dashdec.c:151
int av_parse_video_rate(AVRational *rate, const char *arg)
Parse str and store the detected values in *rate.
Definition: parseutils.c:179
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1517
AVOption.
Definition: opt.h:248
int n_audios
Definition: dashdec.c:127
static int64_t get_segment_start_time_based_on_timeline(struct representation *pls, int64_t cur_seq_no)
Definition: dashdec.c:251
int n_timelines
Definition: dashdec.c:94
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
static int read_data(void *opaque, uint8_t *buf, int buf_size)
Definition: dashdec.c:1754
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
Definition: utils.c:4871
int ff_copy_whiteblacklists(AVFormatContext *dst, const AVFormatContext *src)
Copies the whilelists from one context to the other.
Definition: utils.c:160
char * av_stristr(const char *s1, const char *s2)
Locate the first case-independent occurrence in the string haystack of the string needle...
Definition: avstring.c:56
int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size)
Read contents of h into print buffer, up to max_size bytes, or up to EOF.
Definition: aviobuf.c:1254
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp, int flags)
Definition: libcdio.c:153
static int ishttp(char *url)
Definition: dashdec.c:161
int num
Numerator.
Definition: rational.h:59
int64_t avio_seek(AVIOContext *s, int64_t offset, int whence)
fseek() equivalent for AVIOContext.
Definition: aviobuf.c:253
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
#define AVIO_FLAG_READ
read-only
Definition: avio.h:674
int64_t size
Definition: dashdec.c:37
unsigned char * buffer
Start of the buffer.
Definition: avio.h:226
int av_strncasecmp(const char *a, const char *b, size_t n)
Locale-independent case-insensitive compare.
Definition: avstring.c:225
static struct fragment * get_current_fragment(struct representation *pls)
Definition: dashdec.c:1575
static int read_from_url(struct representation *pls, struct fragment *seg, uint8_t *buf, int buf_size)
Definition: dashdec.c:1650
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
GLint GLenum type
Definition: opengl_enc.c:104
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:500
static const AVOption dash_options[]
Definition: dashdec.c:2363
static int64_t seek_data(void *opaque, int64_t offset, int whence)
Definition: dashdec.c:1744
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
discard all
Definition: avcodec.h:236
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:982
static AVPacket pkt
int av_probe_input_buffer(AVIOContext *pb, ff_const59 AVInputFormat **fmt, const char *url, void *logctx, unsigned int offset, unsigned int max_probe_size)
Like av_probe_input_buffer2() but returns 0 on success.
Definition: format.c:312
int64_t cur_timestamp
Definition: dashdec.c:117
int n_videos
Definition: dashdec.c:125
uint64_t availability_end_time
Definition: dashdec.c:136
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
static int parse_manifest_segmenturlnode(AVFormatContext *s, struct representation *rep, xmlNodePtr fragmenturl_node, xmlNodePtr *baseurl_nodes, char *rep_id_val, char *rep_bandwidth_val)
Definition: dashdec.c:592
int is_init_section_common_audio
Definition: dashdec.c:157
uint64_t min_buffer_time
Definition: dashdec.c:140
static void free_fragment(struct fragment **seg)
Definition: dashdec.c:316
Format I/O context.
Definition: avformat.h:1239
#define MAX_URL_SIZE
Definition: internal.h:30
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
void ff_read_frame_flush(AVFormatContext *s)
Flush the frame reader.
Definition: utils.c:1863
struct fragment * init_section
Definition: dashdec.c:112
uint32_t init_sec_buf_read_offset
Definition: dashdec.c:116
int stream_index
Definition: dashdec.c:83
static uint64_t get_utc_date_time_insec(AVFormatContext *s, const char *datetime)
Definition: dashdec.c:177
static int64_t start_time
Definition: ffplay.c:332
uint64_t suggested_presentation_delay
Definition: dashdec.c:134
uint8_t
static int parse_programinformation(AVFormatContext *s, xmlNodePtr node)
Definition: dashdec.c:1153
Round toward +infinity.
Definition: mathematics.h:83
#define av_malloc(s)
uint64_t media_presentation_duration
Definition: dashdec.c:133
AVOptions.
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:220
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
int64_t presentation_timeoffset
Definition: dashdec.c:104
int id
Format-specific stream ID.
Definition: avformat.h:887
static int dash_close(AVFormatContext *s)
Definition: dashdec.c:2239
void ff_format_io_close(AVFormatContext *s, AVIOContext **pb)
Definition: utils.c:5618
GLsizei GLboolean const GLfloat * value
Definition: opengl_enc.c:108
uint64_t period_duration
Definition: dashdec.c:143
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:986
AVStream * avformat_new_stream(AVFormatContext *s, const AVCodec *c)
Add a new stream to a media file.
Definition: utils.c:4453
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1307
int64_t duration
Definition: movenc.c:63
int64_t first_seq_no
Definition: dashdec.c:97
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:211
AVIOContext pb
Definition: dashdec.c:79
static void finish(void)
Definition: movenc.c:345
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1370
AVProgram * av_new_program(AVFormatContext *s, int id)
Definition: utils.c:4551
struct timeline ** timelines
Definition: dashdec.c:95
#define AVERROR_EOF
End of file.
Definition: error.h:55
static av_cold int read_close(AVFormatContext *ctx)
Definition: libcdio.c:145
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
uint8_t * data
Definition: packet.h:307
int av_match_ext(const char *filename, const char *extensions)
Return a positive value if the given filename has one of the given extensions, 0 otherwise.
Definition: format.c:38
uint64_t publish_time
Definition: dashdec.c:137
int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem)
Add an element to a dynamic array.
Definition: mem.c:296
ptrdiff_t size
Definition: opengl_enc.c:100
static void recheck_discard_flags(AVFormatContext *s, struct representation **p, int n)
Definition: dashdec.c:2151
uint64_t availability_start_time
Definition: dashdec.c:135
static enum AVMediaType get_content_type(xmlNodePtr node)
Definition: dashdec.c:548
#define av_log(a,...)
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:637
struct representation ** audios
Definition: dashdec.c:128
#define INITIAL_BUFFER_SIZE
Definition: dashdec.c:31
static xmlNodePtr find_child_node_by_name(xmlNodePtr rootnode, const char *nodename)
Definition: dashdec.c:531
static int aligned(int val)
Definition: dashdec.c:167
struct representation ** subtitles
Definition: dashdec.c:130
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
#define ff_const59
The ff_const59 define is not part of the public API and will be removed without further warning...
Definition: avformat.h:544
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1479
uint32_t init_sec_data_len
Definition: dashdec.c:115
#define AV_BPRINT_SIZE_UNLIMITED
static void free_timelines_list(struct representation *pls)
Definition: dashdec.c:336
int64_t starttime
Definition: dashdec.c:59
static void move_segments(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1458
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is needed
static int64_t calc_max_seg_no(struct representation *pls, DASHContext *c)
Definition: dashdec.c:1418
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:153
static int64_t calc_cur_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1366
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: aviobuf.c:1173
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
char * url
input or output URL.
Definition: avformat.h:1335
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
static int is_common_init_section_exist(struct representation **pls, int n_pls)
Definition: dashdec.c:1980
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
static int dash_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Definition: dashdec.c:2314
char * av_strireplace(const char *str, const char *from, const char *to)
Locale-independent strings replace.
Definition: avstring.c:237
enum AVPacketSideDataType type
Definition: packet.h:309
char * lang
Definition: dashdec.c:86
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1156
#define FFMAX(a, b)
Definition: common.h:94
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:123
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
unsigned char * buf
Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero.
Definition: avformat.h:443
uint64_t minimum_update_period
Definition: dashdec.c:138
struct fragment ** fragments
Definition: dashdec.c:92
static void free_representation(struct representation *pls)
Definition: dashdec.c:347
AVIOInterruptCB * interrupt_callback
Definition: dashdec.c:150
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1295
static void free_audio_list(DASHContext *c)
Definition: dashdec.c:377
AVDictionary * opts
Definition: movenc.c:50
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
#define FFMIN(a, b)
Definition: common.h:96
int av_strcasecmp(const char *a, const char *b)
Locale-independent case-insensitive compare.
Definition: avstring.c:215
static void free_fragment_list(struct representation *pls)
Definition: dashdec.c:325
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:560
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url, AVDictionary **opts, AVDictionary *opts2, int *is_http)
Definition: dashdec.c:399
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
char * adaptionset_lang
Definition: dashdec.c:147
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C program
Definition: undefined.txt:3
int32_t
static void move_timelines(struct representation *rep_src, struct representation *rep_dest, DASHContext *c)
Definition: dashdec.c:1444
static int dash_probe(const AVProbeData *p)
Definition: dashdec.c:2342
int n_subtitles
Definition: dashdec.c:129
static uint32_t get_duration_insec(AVFormatContext *s, const char *duration)
Definition: dashdec.c:207
#define s(width, name)
Definition: cbs_vp9.c:257
int is_live
Definition: dashdec.c:149
int ff_make_absolute_url(char *buf, int size, const char *base, const char *rel)
Convert a relative url into an absolute url, given a base url.
Definition: url.c:181
#define OFFSET(x)
Definition: dashdec.c:2361
static int parse_manifest(AVFormatContext *s, const char *url, AVIOContext *in)
Definition: dashdec.c:1182
AVDictionary * metadata
Definition: avformat.h:944
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5484
#define AVFMT_FLAG_CUSTOM_IO
The caller has supplied a custom AVIOContext, don&#39;t avio_close() it.
Definition: avformat.h:1378
Usually treated as AVMEDIA_TYPE_DATA.
Definition: avutil.h:200
static int save_avio_options(AVFormatContext *s)
Definition: dashdec.c:1817
char * url
Definition: dashdec.c:38
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:58
uint64_t period_start
Definition: dashdec.c:144
int64_t max_analyze_duration
Maximum duration (in AV_TIME_BASE units) of the data read from input in avformat_find_stream_info().
Definition: avformat.h:1413
static char * get_val_from_nodes_tab(xmlNodePtr *nodes, const int n_nodes, const char *attrname)
Definition: dashdec.c:515
static int nested_io_open(AVFormatContext *s, AVIOContext **pb, const char *url, int flags, AVDictionary **opts)
Definition: dashdec.c:1842
static int read_header(FFV1Context *f)
Definition: ffv1dec.c:527
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
Stream structure.
Definition: avformat.h:880
void ff_dash_fill_tmpl_params(char *dst, size_t buffer_size, const char *template, int rep_id, int number, int bit_rate, int64_t time)
Definition: dash.c:96
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
Definition: avio_reading.c:42
AVFormatContext * parent
Definition: dashdec.c:81
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
int ff_check_interrupt(AVIOInterruptCB *cb)
Check if the user has requested to interrupt a blocking function associated with cb.
Definition: avio.c:666
AVIOContext * pb
I/O context.
Definition: avformat.h:1281
int64_t last_seq_no
Definition: dashdec.c:98
static int parse_manifest_adaptationset_attr(AVFormatContext *s, xmlNodePtr adaptionset_node)
Definition: dashdec.c:1084
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
uint32_t init_sec_buf_size
Definition: dashdec.c:114
int64_t cur_seq_no
Definition: dashdec.c:106
int max_url_size
Definition: dashdec.c:153
static int copy_init_section(struct representation *rep_dest, struct representation *rep_src)
Definition: dashdec.c:2002
static int dash_read_header(AVFormatContext *s)
Definition: dashdec.c:2019
uint64_t time_shift_buffer_depth
Definition: dashdec.c:139
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
static int open_input(DASHContext *c, struct representation *pls, struct fragment *seg)
Definition: dashdec.c:1666
static int dash_read_packet(AVFormatContext *s, AVPacket *pkt)
Definition: dashdec.c:2176
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int resolve_content_path(AVFormatContext *s, const char *url, int *max_url_size, xmlNodePtr *baseurl_nodes, int n_baseurl_nodes)
Definition: dashdec.c:699
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2408
AVMediaType
Definition: avutil.h:199
static struct fragment * get_Fragment(char *range)
Definition: dashdec.c:574
static int parse_manifest_segmenttimeline(AVFormatContext *s, struct representation *rep, xmlNodePtr fragment_timeline_node)
Definition: dashdec.c:658
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1167
char id[20]
Definition: dashdec.c:85
AVDictionary * avio_opts
Definition: dashdec.c:152
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4382
This structure contains the data a format has to probe a file.
Definition: avformat.h:441
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1712
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
Round toward -infinity.
Definition: mathematics.h:82
const char * avio_find_protocol_name(const char *url)
Return the name of the protocol that will handle the passed URL.
Definition: avio.c:475
#define flags(name, subs,...)
Definition: cbs_av1.c:560
AVInputFormat ff_dash_demuxer
Definition: dashdec.c:2378
static int read_probe(const AVProbeData *pd)
Definition: jvdec.c:55
static int dash_seek(AVFormatContext *s, struct representation *pls, int64_t seek_pos_msec, int flags, int dry_run)
Definition: dashdec.c:2250
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2459
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:186
static int update_init_section(struct representation *pls)
Definition: dashdec.c:1698
#define AVPROBE_SCORE_MAX
maximum score
Definition: avformat.h:453
int av_strstart(const char *str, const char *pfx, const char **ptr)
Return non-zero if pfx is a prefix of str.
Definition: avstring.c:34
int
int64_t duration
Definition: dashdec.c:69
int ffio_init_context(AVIOContext *s, unsigned char *buffer, int buffer_size, int write_flag, void *opaque, int(*read_packet)(void *opaque, uint8_t *buf, int buf_size), int(*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t(*seek)(void *opaque, int64_t offset, int whence))
Definition: aviobuf.c:88
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3547
static const AVClass dash_class
Definition: dashdec.c:2371
int64_t fragment_duration
Definition: dashdec.c:101
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:933
struct fragment * cur_seg
Definition: dashdec.c:109
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1062
static void free_subtitle_list(DASHContext *c)
Definition: dashdec.c:388
int bandwidth
Definition: dashdec.c:87
int den
Denominator.
Definition: rational.h:60
AVFormatContext * ctx
Definition: dashdec.c:82
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4425
int64_t fragment_timescale
Definition: dashdec.c:102
static void close_demux_for_component(struct representation *pls)
Definition: dashdec.c:1852
int is_restart_needed
Definition: dashdec.c:118
int av_opt_get(void *obj, const char *name, int search_flags, uint8_t **out_val)
Definition: opt.c:779
#define av_free(p)
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:470
static int parse_manifest_adaptationset(AVFormatContext *s, const char *url, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node)
Definition: dashdec.c:1097
uint8_t * init_sec_buf
Definition: dashdec.c:113
static char * get_content_url(xmlNodePtr *baseurl_nodes, int n_baseurl_nodes, int max_url_size, char *rep_id_val, char *rep_bandwidth_val, char *val)
Definition: dashdec.c:464
AVRational framerate
Definition: dashdec.c:88
void * priv_data
Format private data.
Definition: avformat.h:1267
int64_t start_number
Definition: dashdec.c:99
static uint64_t get_current_time_in_sec(void)
Definition: dashdec.c:172
static int parse_manifest_representation(AVFormatContext *s, const char *url, xmlNodePtr node, xmlNodePtr adaptionset_node, xmlNodePtr mpd_baseurl_node, xmlNodePtr period_baseurl_node, xmlNodePtr period_segmenttemplate_node, xmlNodePtr period_segmentlist_node, xmlNodePtr fragment_template_node, xmlNodePtr content_component_node, xmlNodePtr adaptionset_baseurl_node, xmlNodePtr adaptionset_segmentlist_node, xmlNodePtr adaptionset_supplementalproperty_node)
Definition: dashdec.c:818
static int64_t calc_min_seg_no(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1404
int64_t cur_seg_offset
Definition: dashdec.c:107
struct representation ** videos
Definition: dashdec.c:126
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1354
static void free_video_list(DASHContext *c)
Definition: dashdec.c:366
#define av_freep(p)
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:652
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1045
char * url_template
Definition: dashdec.c:78
int is_init_section_common_video
Definition: dashdec.c:156
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:368
int stream_index
Definition: packet.h:365
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:909
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Frame references ownership and permissions
static int reopen_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1861
int64_t repeat
Definition: dashdec.c:65
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2087
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:935
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1022
static double val(void *priv, double ch)
Definition: aeval.c:76
This structure stores compressed data.
Definition: packet.h:340
#define MAX_BPRINT_READ_SIZE
Definition: dashdec.c:32
static int64_t calc_next_seg_no_from_timelines(struct representation *pls, int64_t cur_time)
Definition: dashdec.c:286
char * base_url
Definition: dashdec.c:123
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:356
static int open_demux_for_component(AVFormatContext *s, struct representation *pls)
Definition: dashdec.c:1933
int i
Definition: input.c:407
AVStream * assoc_stream
Definition: dashdec.c:89
static av_cold void cleanup(FlashSV2Context *s)
Definition: flashsv2enc.c:127
static int refresh_manifest(AVFormatContext *s)
Definition: dashdec.c:1476
static uint8_t tmp[11]
Definition: aes_ctr.c:26