FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* SDL audio buffer size, in samples. Should be small to have precise
71  A/V sync as SDL does not have hardware buffer fullness info. */
72 #define SDL_AUDIO_BUFFER_SIZE 1024
73 
74 /* no AV sync correction is done if below the minimum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MIN 0.01
76 /* AV sync correction is done if above the maximum AV sync threshold */
77 #define AV_SYNC_THRESHOLD_MAX 0.1
78 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
79 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
80 /* no AV correction is done if too big error */
81 #define AV_NOSYNC_THRESHOLD 10.0
82 
83 /* maximum audio speed change to get correct sync */
84 #define SAMPLE_CORRECTION_PERCENT_MAX 10
85 
86 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
87 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
88 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
89 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
90 
91 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
92 #define AUDIO_DIFF_AVG_NB 20
93 
94 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
95 #define REFRESH_RATE 0.01
96 
97 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
98 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
99 #define SAMPLE_ARRAY_SIZE (8 * 65536)
100 
101 #define CURSOR_HIDE_DELAY 1000000
102 
103 static int64_t sws_flags = SWS_BICUBIC;
104 
105 typedef struct MyAVPacketList {
108  int serial;
110 
111 typedef struct PacketQueue {
114  int size;
116  int serial;
117  SDL_mutex *mutex;
118  SDL_cond *cond;
119 } PacketQueue;
120 
121 #define VIDEO_PICTURE_QUEUE_SIZE 3
122 #define SUBPICTURE_QUEUE_SIZE 4
123 
124 typedef struct VideoPicture {
125  double pts; // presentation timestamp for this picture
126  double duration; // estimated duration based on frame rate
127  int64_t pos; // byte position in file
128  SDL_Overlay *bmp;
129  int width, height; /* source height & width */
132  int serial;
133 
135 } VideoPicture;
136 
137 typedef struct SubPicture {
138  double pts; /* presentation time stamp for this picture */
140  int serial;
141 } SubPicture;
142 
143 typedef struct AudioParams {
144  int freq;
145  int channels;
146  int64_t channel_layout;
150 } AudioParams;
151 
152 typedef struct Clock {
153  double pts; /* clock base */
154  double pts_drift; /* clock base minus time at which we updated the clock */
155  double last_updated;
156  double speed;
157  int serial; /* clock is based on a packet with this serial */
158  int paused;
159  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
160 } Clock;
161 
162 enum {
163  AV_SYNC_AUDIO_MASTER, /* default choice */
165  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
166 };
167 
168 typedef struct VideoState {
169  SDL_Thread *read_tid;
170  SDL_Thread *video_tid;
175  int paused;
178  int seek_req;
180  int64_t seek_pos;
181  int64_t seek_rel;
184  int realtime;
187 
191 
193 
195 
196  double audio_clock;
198  double audio_diff_cum; /* used for AV difference average computation */
208  unsigned int audio_buf_size; /* in bytes */
209  unsigned int audio_buf1_size;
210  int audio_buf_index; /* in bytes */
218 #if CONFIG_AVFILTER
219  struct AudioParams audio_filter_src;
220 #endif
227 
228  enum ShowMode {
230  } show_mode;
237  int xpos;
239 
240  SDL_Thread *subtitle_tid;
246  SDL_mutex *subpq_mutex;
247  SDL_cond *subpq_cond;
248 
249  double frame_timer;
255  int64_t video_current_pos; // current displayed file pos
256  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
259  SDL_mutex *pictq_mutex;
260  SDL_cond *pictq_cond;
261 #if !CONFIG_AVFILTER
263 #endif
265 
266  char filename[1024];
268  int step;
269 
270 #if CONFIG_AVFILTER
271  AVFilterContext *in_video_filter; // the first filter in the video chain
272  AVFilterContext *out_video_filter; // the last filter in the video chain
273  AVFilterContext *in_audio_filter; // the first filter in the audio chain
274  AVFilterContext *out_audio_filter; // the last filter in the audio chain
275  AVFilterGraph *agraph; // audio filter graph
276 #endif
277 
279 
281 } VideoState;
282 
283 /* options specified by the user */
285 static const char *input_filename;
286 static const char *window_title;
287 static int fs_screen_width;
288 static int fs_screen_height;
289 static int default_width = 640;
290 static int default_height = 480;
291 static int screen_width = 0;
292 static int screen_height = 0;
293 static int audio_disable;
294 static int video_disable;
295 static int subtitle_disable;
297  [AVMEDIA_TYPE_AUDIO] = -1,
298  [AVMEDIA_TYPE_VIDEO] = -1,
299  [AVMEDIA_TYPE_SUBTITLE] = -1,
300 };
301 static int seek_by_bytes = -1;
302 static int display_disable;
303 static int show_status = 1;
305 static int64_t start_time = AV_NOPTS_VALUE;
306 static int64_t duration = AV_NOPTS_VALUE;
307 static int workaround_bugs = 1;
308 static int fast = 0;
309 static int genpts = 0;
310 static int lowres = 0;
311 static int error_concealment = 3;
312 static int decoder_reorder_pts = -1;
313 static int autoexit;
314 static int exit_on_keydown;
315 static int exit_on_mousedown;
316 static int loop = 1;
317 static int framedrop = -1;
318 static int infinite_buffer = -1;
319 static enum ShowMode show_mode = SHOW_MODE_NONE;
320 static const char *audio_codec_name;
321 static const char *subtitle_codec_name;
322 static const char *video_codec_name;
323 double rdftspeed = 0.02;
324 static int64_t cursor_last_shown;
325 static int cursor_hidden = 0;
326 #if CONFIG_AVFILTER
327 static char *vfilters = NULL;
328 static char *afilters = NULL;
329 #endif
330 
331 /* current context */
332 static int is_full_screen;
333 static int64_t audio_callback_time;
334 
336 
337 #define FF_ALLOC_EVENT (SDL_USEREVENT)
338 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
339 
340 static SDL_Surface *screen;
341 
342 static inline
343 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
344  enum AVSampleFormat fmt2, int64_t channel_count2)
345 {
346  /* If channel count == 1, planar and non-planar formats are the same */
347  if (channel_count1 == 1 && channel_count2 == 1)
349  else
350  return channel_count1 != channel_count2 || fmt1 != fmt2;
351 }
352 
353 static inline
354 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
355 {
356  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
357  return channel_layout;
358  else
359  return 0;
360 }
361 
363 {
364  MyAVPacketList *pkt1;
365 
366  if (q->abort_request)
367  return -1;
368 
369  pkt1 = av_malloc(sizeof(MyAVPacketList));
370  if (!pkt1)
371  return -1;
372  pkt1->pkt = *pkt;
373  pkt1->next = NULL;
374  if (pkt == &flush_pkt)
375  q->serial++;
376  pkt1->serial = q->serial;
377 
378  if (!q->last_pkt)
379  q->first_pkt = pkt1;
380  else
381  q->last_pkt->next = pkt1;
382  q->last_pkt = pkt1;
383  q->nb_packets++;
384  q->size += pkt1->pkt.size + sizeof(*pkt1);
385  /* XXX: should duplicate packet data in DV case */
386  SDL_CondSignal(q->cond);
387  return 0;
388 }
389 
391 {
392  int ret;
393 
394  /* duplicate the packet */
395  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
396  return -1;
397 
398  SDL_LockMutex(q->mutex);
399  ret = packet_queue_put_private(q, pkt);
400  SDL_UnlockMutex(q->mutex);
401 
402  if (pkt != &flush_pkt && ret < 0)
403  av_free_packet(pkt);
404 
405  return ret;
406 }
407 
408 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
409 {
410  AVPacket pkt1, *pkt = &pkt1;
411  av_init_packet(pkt);
412  pkt->data = NULL;
413  pkt->size = 0;
414  pkt->stream_index = stream_index;
415  return packet_queue_put(q, pkt);
416 }
417 
418 /* packet queue handling */
420 {
421  memset(q, 0, sizeof(PacketQueue));
422  q->mutex = SDL_CreateMutex();
423  q->cond = SDL_CreateCond();
424  q->abort_request = 1;
425 }
426 
428 {
429  MyAVPacketList *pkt, *pkt1;
430 
431  SDL_LockMutex(q->mutex);
432  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
433  pkt1 = pkt->next;
434  av_free_packet(&pkt->pkt);
435  av_freep(&pkt);
436  }
437  q->last_pkt = NULL;
438  q->first_pkt = NULL;
439  q->nb_packets = 0;
440  q->size = 0;
441  SDL_UnlockMutex(q->mutex);
442 }
443 
445 {
447  SDL_DestroyMutex(q->mutex);
448  SDL_DestroyCond(q->cond);
449 }
450 
452 {
453  SDL_LockMutex(q->mutex);
454 
455  q->abort_request = 1;
456 
457  SDL_CondSignal(q->cond);
458 
459  SDL_UnlockMutex(q->mutex);
460 }
461 
463 {
464  SDL_LockMutex(q->mutex);
465  q->abort_request = 0;
466  packet_queue_put_private(q, &flush_pkt);
467  SDL_UnlockMutex(q->mutex);
468 }
469 
470 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
471 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
472 {
473  MyAVPacketList *pkt1;
474  int ret;
475 
476  SDL_LockMutex(q->mutex);
477 
478  for (;;) {
479  if (q->abort_request) {
480  ret = -1;
481  break;
482  }
483 
484  pkt1 = q->first_pkt;
485  if (pkt1) {
486  q->first_pkt = pkt1->next;
487  if (!q->first_pkt)
488  q->last_pkt = NULL;
489  q->nb_packets--;
490  q->size -= pkt1->pkt.size + sizeof(*pkt1);
491  *pkt = pkt1->pkt;
492  if (serial)
493  *serial = pkt1->serial;
494  av_free(pkt1);
495  ret = 1;
496  break;
497  } else if (!block) {
498  ret = 0;
499  break;
500  } else {
501  SDL_CondWait(q->cond, q->mutex);
502  }
503  }
504  SDL_UnlockMutex(q->mutex);
505  return ret;
506 }
507 
508 static inline void fill_rectangle(SDL_Surface *screen,
509  int x, int y, int w, int h, int color, int update)
510 {
511  SDL_Rect rect;
512  rect.x = x;
513  rect.y = y;
514  rect.w = w;
515  rect.h = h;
516  SDL_FillRect(screen, &rect, color);
517  if (update && w > 0 && h > 0)
518  SDL_UpdateRect(screen, x, y, w, h);
519 }
520 
521 /* draw only the border of a rectangle */
522 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
523 {
524  int w1, w2, h1, h2;
525 
526  /* fill the background */
527  w1 = x;
528  if (w1 < 0)
529  w1 = 0;
530  w2 = width - (x + w);
531  if (w2 < 0)
532  w2 = 0;
533  h1 = y;
534  if (h1 < 0)
535  h1 = 0;
536  h2 = height - (y + h);
537  if (h2 < 0)
538  h2 = 0;
540  xleft, ytop,
541  w1, height,
542  color, update);
544  xleft + width - w2, ytop,
545  w2, height,
546  color, update);
548  xleft + w1, ytop,
549  width - w1 - w2, h1,
550  color, update);
552  xleft + w1, ytop + height - h2,
553  width - w1 - w2, h2,
554  color, update);
555 }
556 
557 #define ALPHA_BLEND(a, oldp, newp, s)\
558 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
559 
560 #define RGBA_IN(r, g, b, a, s)\
561 {\
562  unsigned int v = ((const uint32_t *)(s))[0];\
563  a = (v >> 24) & 0xff;\
564  r = (v >> 16) & 0xff;\
565  g = (v >> 8) & 0xff;\
566  b = v & 0xff;\
567 }
568 
569 #define YUVA_IN(y, u, v, a, s, pal)\
570 {\
571  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
572  a = (val >> 24) & 0xff;\
573  y = (val >> 16) & 0xff;\
574  u = (val >> 8) & 0xff;\
575  v = val & 0xff;\
576 }
577 
578 #define YUVA_OUT(d, y, u, v, a)\
579 {\
580  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
581 }
582 
583 
584 #define BPP 1
585 
586 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
587 {
588  int wrap, wrap3, width2, skip2;
589  int y, u, v, a, u1, v1, a1, w, h;
590  uint8_t *lum, *cb, *cr;
591  const uint8_t *p;
592  const uint32_t *pal;
593  int dstx, dsty, dstw, dsth;
594 
595  dstw = av_clip(rect->w, 0, imgw);
596  dsth = av_clip(rect->h, 0, imgh);
597  dstx = av_clip(rect->x, 0, imgw - dstw);
598  dsty = av_clip(rect->y, 0, imgh - dsth);
599  lum = dst->data[0] + dsty * dst->linesize[0];
600  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
601  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
602 
603  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
604  skip2 = dstx >> 1;
605  wrap = dst->linesize[0];
606  wrap3 = rect->pict.linesize[0];
607  p = rect->pict.data[0];
608  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
609 
610  if (dsty & 1) {
611  lum += dstx;
612  cb += skip2;
613  cr += skip2;
614 
615  if (dstx & 1) {
616  YUVA_IN(y, u, v, a, p, pal);
617  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
619  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
620  cb++;
621  cr++;
622  lum++;
623  p += BPP;
624  }
625  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
626  YUVA_IN(y, u, v, a, p, pal);
627  u1 = u;
628  v1 = v;
629  a1 = a;
630  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631 
632  YUVA_IN(y, u, v, a, p + BPP, pal);
633  u1 += u;
634  v1 += v;
635  a1 += a;
636  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
637  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
638  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
639  cb++;
640  cr++;
641  p += 2 * BPP;
642  lum += 2;
643  }
644  if (w) {
645  YUVA_IN(y, u, v, a, p, pal);
646  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
647  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
648  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
649  p++;
650  lum++;
651  }
652  p += wrap3 - dstw * BPP;
653  lum += wrap - dstw - dstx;
654  cb += dst->linesize[1] - width2 - skip2;
655  cr += dst->linesize[2] - width2 - skip2;
656  }
657  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
658  lum += dstx;
659  cb += skip2;
660  cr += skip2;
661 
662  if (dstx & 1) {
663  YUVA_IN(y, u, v, a, p, pal);
664  u1 = u;
665  v1 = v;
666  a1 = a;
667  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
668  p += wrap3;
669  lum += wrap;
670  YUVA_IN(y, u, v, a, p, pal);
671  u1 += u;
672  v1 += v;
673  a1 += a;
674  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
675  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
676  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
677  cb++;
678  cr++;
679  p += -wrap3 + BPP;
680  lum += -wrap + 1;
681  }
682  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
683  YUVA_IN(y, u, v, a, p, pal);
684  u1 = u;
685  v1 = v;
686  a1 = a;
687  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
688 
689  YUVA_IN(y, u, v, a, p + BPP, pal);
690  u1 += u;
691  v1 += v;
692  a1 += a;
693  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
694  p += wrap3;
695  lum += wrap;
696 
697  YUVA_IN(y, u, v, a, p, pal);
698  u1 += u;
699  v1 += v;
700  a1 += a;
701  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
702 
703  YUVA_IN(y, u, v, a, p + BPP, pal);
704  u1 += u;
705  v1 += v;
706  a1 += a;
707  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
708 
709  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
710  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
711 
712  cb++;
713  cr++;
714  p += -wrap3 + 2 * BPP;
715  lum += -wrap + 2;
716  }
717  if (w) {
718  YUVA_IN(y, u, v, a, p, pal);
719  u1 = u;
720  v1 = v;
721  a1 = a;
722  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
723  p += wrap3;
724  lum += wrap;
725  YUVA_IN(y, u, v, a, p, pal);
726  u1 += u;
727  v1 += v;
728  a1 += a;
729  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
730  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
731  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
732  cb++;
733  cr++;
734  p += -wrap3 + BPP;
735  lum += -wrap + 1;
736  }
737  p += wrap3 + (wrap3 - dstw * BPP);
738  lum += wrap + (wrap - dstw - dstx);
739  cb += dst->linesize[1] - width2 - skip2;
740  cr += dst->linesize[2] - width2 - skip2;
741  }
742  /* handle odd height */
743  if (h) {
744  lum += dstx;
745  cb += skip2;
746  cr += skip2;
747 
748  if (dstx & 1) {
749  YUVA_IN(y, u, v, a, p, pal);
750  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
751  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
752  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
753  cb++;
754  cr++;
755  lum++;
756  p += BPP;
757  }
758  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
759  YUVA_IN(y, u, v, a, p, pal);
760  u1 = u;
761  v1 = v;
762  a1 = a;
763  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
764 
765  YUVA_IN(y, u, v, a, p + BPP, pal);
766  u1 += u;
767  v1 += v;
768  a1 += a;
769  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
770  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
771  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
772  cb++;
773  cr++;
774  p += 2 * BPP;
775  lum += 2;
776  }
777  if (w) {
778  YUVA_IN(y, u, v, a, p, pal);
779  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
780  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
781  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
782  }
783  }
784 }
785 
786 static void free_picture(VideoPicture *vp)
787 {
788  if (vp->bmp) {
789  SDL_FreeYUVOverlay(vp->bmp);
790  vp->bmp = NULL;
791  }
792 }
793 
795 {
796  avsubtitle_free(&sp->sub);
797 }
798 
799 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
800 {
801  float aspect_ratio;
802  int width, height, x, y;
803 
804  if (vp->sar.num == 0)
805  aspect_ratio = 0;
806  else
807  aspect_ratio = av_q2d(vp->sar);
808 
809  if (aspect_ratio <= 0.0)
810  aspect_ratio = 1.0;
811  aspect_ratio *= (float)vp->width / (float)vp->height;
812 
813  /* XXX: we suppose the screen has a 1.0 pixel ratio */
814  height = scr_height;
815  width = ((int)rint(height * aspect_ratio)) & ~1;
816  if (width > scr_width) {
817  width = scr_width;
818  height = ((int)rint(width / aspect_ratio)) & ~1;
819  }
820  x = (scr_width - width) / 2;
821  y = (scr_height - height) / 2;
822  rect->x = scr_xleft + x;
823  rect->y = scr_ytop + y;
824  rect->w = FFMAX(width, 1);
825  rect->h = FFMAX(height, 1);
826 }
827 
829 {
830  VideoPicture *vp;
831  SubPicture *sp;
832  AVPicture pict;
833  SDL_Rect rect;
834  int i;
835 
836  vp = &is->pictq[is->pictq_rindex];
837  if (vp->bmp) {
838  if (is->subtitle_st) {
839  if (is->subpq_size > 0) {
840  sp = &is->subpq[is->subpq_rindex];
841 
842  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
843  SDL_LockYUVOverlay (vp->bmp);
844 
845  pict.data[0] = vp->bmp->pixels[0];
846  pict.data[1] = vp->bmp->pixels[2];
847  pict.data[2] = vp->bmp->pixels[1];
848 
849  pict.linesize[0] = vp->bmp->pitches[0];
850  pict.linesize[1] = vp->bmp->pitches[2];
851  pict.linesize[2] = vp->bmp->pitches[1];
852 
853  for (i = 0; i < sp->sub.num_rects; i++)
854  blend_subrect(&pict, sp->sub.rects[i],
855  vp->bmp->w, vp->bmp->h);
856 
857  SDL_UnlockYUVOverlay (vp->bmp);
858  }
859  }
860  }
861 
862  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
863 
864  SDL_DisplayYUVOverlay(vp->bmp, &rect);
865 
866  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
867  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
868  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
869  is->last_display_rect = rect;
870  }
871  }
872 }
873 
874 static inline int compute_mod(int a, int b)
875 {
876  return a < 0 ? a%b + b : a%b;
877 }
878 
880 {
881  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
882  int ch, channels, h, h2, bgcolor, fgcolor;
883  int64_t time_diff;
884  int rdft_bits, nb_freq;
885 
886  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
887  ;
888  nb_freq = 1 << (rdft_bits - 1);
889 
890  /* compute display index : center on currently output samples */
891  channels = s->audio_tgt.channels;
892  nb_display_channels = channels;
893  if (!s->paused) {
894  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
895  n = 2 * channels;
896  delay = s->audio_write_buf_size;
897  delay /= n;
898 
899  /* to be more precise, we take into account the time spent since
900  the last buffer computation */
901  if (audio_callback_time) {
902  time_diff = av_gettime() - audio_callback_time;
903  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
904  }
905 
906  delay += 2 * data_used;
907  if (delay < data_used)
908  delay = data_used;
909 
910  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
911  if (s->show_mode == SHOW_MODE_WAVES) {
912  h = INT_MIN;
913  for (i = 0; i < 1000; i += channels) {
914  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
915  int a = s->sample_array[idx];
916  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
917  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
918  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
919  int score = a - d;
920  if (h < score && (b ^ c) < 0) {
921  h = score;
922  i_start = idx;
923  }
924  }
925  }
926 
927  s->last_i_start = i_start;
928  } else {
929  i_start = s->last_i_start;
930  }
931 
932  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
933  if (s->show_mode == SHOW_MODE_WAVES) {
935  s->xleft, s->ytop, s->width, s->height,
936  bgcolor, 0);
937 
938  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
939 
940  /* total height for one channel */
941  h = s->height / nb_display_channels;
942  /* graph height / 2 */
943  h2 = (h * 9) / 20;
944  for (ch = 0; ch < nb_display_channels; ch++) {
945  i = i_start + ch;
946  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
947  for (x = 0; x < s->width; x++) {
948  y = (s->sample_array[i] * h2) >> 15;
949  if (y < 0) {
950  y = -y;
951  ys = y1 - y;
952  } else {
953  ys = y1;
954  }
956  s->xleft + x, ys, 1, y,
957  fgcolor, 0);
958  i += channels;
959  if (i >= SAMPLE_ARRAY_SIZE)
960  i -= SAMPLE_ARRAY_SIZE;
961  }
962  }
963 
964  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
965 
966  for (ch = 1; ch < nb_display_channels; ch++) {
967  y = s->ytop + ch * h;
969  s->xleft, y, s->width, 1,
970  fgcolor, 0);
971  }
972  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
973  } else {
974  nb_display_channels= FFMIN(nb_display_channels, 2);
975  if (rdft_bits != s->rdft_bits) {
976  av_rdft_end(s->rdft);
977  av_free(s->rdft_data);
978  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
979  s->rdft_bits = rdft_bits;
980  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
981  }
982  {
983  FFTSample *data[2];
984  for (ch = 0; ch < nb_display_channels; ch++) {
985  data[ch] = s->rdft_data + 2 * nb_freq * ch;
986  i = i_start + ch;
987  for (x = 0; x < 2 * nb_freq; x++) {
988  double w = (x-nb_freq) * (1.0 / nb_freq);
989  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
990  i += channels;
991  if (i >= SAMPLE_ARRAY_SIZE)
992  i -= SAMPLE_ARRAY_SIZE;
993  }
994  av_rdft_calc(s->rdft, data[ch]);
995  }
996  /* Least efficient way to do this, we should of course
997  * directly access it but it is more than fast enough. */
998  for (y = 0; y < s->height; y++) {
999  double w = 1 / sqrt(nb_freq);
1000  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1001  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1002  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1003  a = FFMIN(a, 255);
1004  b = FFMIN(b, 255);
1005  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1006 
1008  s->xpos, s->height-y, 1, 1,
1009  fgcolor, 0);
1010  }
1011  }
1012  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1013  if (!s->paused)
1014  s->xpos++;
1015  if (s->xpos >= s->width)
1016  s->xpos= s->xleft;
1017  }
1018 }
1019 
1020 static void stream_close(VideoState *is)
1021 {
1022  int i;
1023  /* XXX: use a special url_shutdown call to abort parse cleanly */
1024  is->abort_request = 1;
1025  SDL_WaitThread(is->read_tid, NULL);
1029 
1030  /* free all pictures */
1031  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1032  free_picture(&is->pictq[i]);
1033  for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1034  free_subpicture(&is->subpq[i]);
1035  SDL_DestroyMutex(is->pictq_mutex);
1036  SDL_DestroyCond(is->pictq_cond);
1037  SDL_DestroyMutex(is->subpq_mutex);
1038  SDL_DestroyCond(is->subpq_cond);
1039  SDL_DestroyCond(is->continue_read_thread);
1040 #if !CONFIG_AVFILTER
1042 #endif
1043  av_free(is);
1044 }
1045 
1046 static void do_exit(VideoState *is)
1047 {
1048  if (is) {
1049  stream_close(is);
1050  }
1051  av_lockmgr_register(NULL);
1052  uninit_opts();
1053 #if CONFIG_AVFILTER
1054  av_freep(&vfilters);
1055 #endif
1057  if (show_status)
1058  printf("\n");
1059  SDL_Quit();
1060  av_log(NULL, AV_LOG_QUIET, "%s", "");
1061  exit(0);
1062 }
1063 
1064 static void sigterm_handler(int sig)
1065 {
1066  exit(123);
1067 }
1068 
1070 {
1071  SDL_Rect rect;
1072  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1073  default_width = rect.w;
1074  default_height = rect.h;
1075 }
1076 
1077 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1078 {
1079  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1080  int w,h;
1081 
1082  if (is_full_screen) flags |= SDL_FULLSCREEN;
1083  else flags |= SDL_RESIZABLE;
1084 
1085  if (vp && vp->width)
1087 
1089  w = fs_screen_width;
1090  h = fs_screen_height;
1091  } else if (!is_full_screen && screen_width) {
1092  w = screen_width;
1093  h = screen_height;
1094  } else {
1095  w = default_width;
1096  h = default_height;
1097  }
1098  w = FFMIN(16383, w);
1099  if (screen && is->width == screen->w && screen->w == w
1100  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1101  return 0;
1102  screen = SDL_SetVideoMode(w, h, 0, flags);
1103  if (!screen) {
1104  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1105  do_exit(is);
1106  }
1107  if (!window_title)
1109  SDL_WM_SetCaption(window_title, window_title);
1110 
1111  is->width = screen->w;
1112  is->height = screen->h;
1113 
1114  return 0;
1115 }
1116 
1117 /* display the current picture, if any */
1118 static void video_display(VideoState *is)
1119 {
1120  if (!screen)
1121  video_open(is, 0, NULL);
1122  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1123  video_audio_display(is);
1124  else if (is->video_st)
1125  video_image_display(is);
1126 }
1127 
1128 static double get_clock(Clock *c)
1129 {
1130  if (*c->queue_serial != c->serial)
1131  return NAN;
1132  if (c->paused) {
1133  return c->pts;
1134  } else {
1135  double time = av_gettime() / 1000000.0;
1136  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1137  }
1138 }
1139 
1140 static void set_clock_at(Clock *c, double pts, int serial, double time)
1141 {
1142  c->pts = pts;
1143  c->last_updated = time;
1144  c->pts_drift = c->pts - time;
1145  c->serial = serial;
1146 }
1147 
1148 static void set_clock(Clock *c, double pts, int serial)
1149 {
1150  double time = av_gettime() / 1000000.0;
1151  set_clock_at(c, pts, serial, time);
1152 }
1153 
1154 static void set_clock_speed(Clock *c, double speed)
1155 {
1156  set_clock(c, get_clock(c), c->serial);
1157  c->speed = speed;
1158 }
1159 
1160 static void init_clock(Clock *c, int *queue_serial)
1161 {
1162  c->speed = 1.0;
1163  c->paused = 0;
1164  c->queue_serial = queue_serial;
1165  set_clock(c, NAN, -1);
1166 }
1167 
1168 static void sync_clock_to_slave(Clock *c, Clock *slave)
1169 {
1170  double clock = get_clock(c);
1171  double slave_clock = get_clock(slave);
1172  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1173  set_clock(c, slave_clock, slave->serial);
1174 }
1175 
1177  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1178  if (is->video_st)
1179  return AV_SYNC_VIDEO_MASTER;
1180  else
1181  return AV_SYNC_AUDIO_MASTER;
1182  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1183  if (is->audio_st)
1184  return AV_SYNC_AUDIO_MASTER;
1185  else
1186  return AV_SYNC_EXTERNAL_CLOCK;
1187  } else {
1188  return AV_SYNC_EXTERNAL_CLOCK;
1189  }
1190 }
1191 
1192 /* get the current master clock value */
1193 static double get_master_clock(VideoState *is)
1194 {
1195  double val;
1196 
1197  switch (get_master_sync_type(is)) {
1198  case AV_SYNC_VIDEO_MASTER:
1199  val = get_clock(&is->vidclk);
1200  break;
1201  case AV_SYNC_AUDIO_MASTER:
1202  val = get_clock(&is->audclk);
1203  break;
1204  default:
1205  val = get_clock(&is->extclk);
1206  break;
1207  }
1208  return val;
1209 }
1210 
1212  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1213  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1215  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1216  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1218  } else {
1219  double speed = is->extclk.speed;
1220  if (speed != 1.0)
1221  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1222  }
1223 }
1224 
1225 /* seek in the stream */
1226 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1227 {
1228  if (!is->seek_req) {
1229  is->seek_pos = pos;
1230  is->seek_rel = rel;
1231  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1232  if (seek_by_bytes)
1234  is->seek_req = 1;
1235  SDL_CondSignal(is->continue_read_thread);
1236  }
1237 }
1238 
1239 /* pause or resume the video */
1241 {
1242  if (is->paused) {
1243  is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1244  if (is->read_pause_return != AVERROR(ENOSYS)) {
1245  is->vidclk.paused = 0;
1246  }
1247  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1248  }
1249  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1250  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1251 }
1252 
1253 static void toggle_pause(VideoState *is)
1254 {
1255  stream_toggle_pause(is);
1256  is->step = 0;
1257 }
1258 
1260 {
1261  /* if the stream is paused unpause it, then step */
1262  if (is->paused)
1263  stream_toggle_pause(is);
1264  is->step = 1;
1265 }
1266 
1267 static double compute_target_delay(double delay, VideoState *is)
1268 {
1269  double sync_threshold, diff;
1270 
1271  /* update delay to follow master synchronisation source */
1273  /* if video is slave, we try to correct big delays by
1274  duplicating or deleting a frame */
1275  diff = get_clock(&is->vidclk) - get_master_clock(is);
1276 
1277  /* skip or repeat frame. We take into account the
1278  delay to compute the threshold. I still don't know
1279  if it is the best guess */
1280  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1281  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1282  if (diff <= -sync_threshold)
1283  delay = FFMAX(0, delay + diff);
1284  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1285  delay = delay + diff;
1286  else if (diff >= sync_threshold)
1287  delay = 2 * delay;
1288  }
1289  }
1290 
1291  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1292  delay, -diff);
1293 
1294  return delay;
1295 }
1296 
1297 static double vp_duration(VideoState *is, VideoPicture *vp, VideoPicture *nextvp) {
1298  if (vp->serial == nextvp->serial) {
1299  double duration = nextvp->pts - vp->pts;
1300  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1301  return vp->duration;
1302  else
1303  return duration;
1304  } else {
1305  return 0.0;
1306  }
1307 }
1308 
1309 static void pictq_next_picture(VideoState *is) {
1310  /* update queue size and signal for next picture */
1312  is->pictq_rindex = 0;
1313 
1314  SDL_LockMutex(is->pictq_mutex);
1315  is->pictq_size--;
1316  SDL_CondSignal(is->pictq_cond);
1317  SDL_UnlockMutex(is->pictq_mutex);
1318 }
1319 
1321  VideoPicture *prevvp;
1322  int ret = 0;
1323  /* update queue size and signal for the previous picture */
1325  if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1326  SDL_LockMutex(is->pictq_mutex);
1328  if (--is->pictq_rindex == -1)
1330  is->pictq_size++;
1331  ret = 1;
1332  }
1333  SDL_CondSignal(is->pictq_cond);
1334  SDL_UnlockMutex(is->pictq_mutex);
1335  }
1336  return ret;
1337 }
1338 
1339 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1340  /* update current video pts */
1341  set_clock(&is->vidclk, pts, serial);
1342  sync_clock_to_slave(&is->extclk, &is->vidclk);
1343  is->video_current_pos = pos;
1344 }
1345 
1346 /* called to display each frame */
1347 static void video_refresh(void *opaque, double *remaining_time)
1348 {
1349  VideoState *is = opaque;
1350  double time;
1351 
1352  SubPicture *sp, *sp2;
1353 
1354  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1356 
1357  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1358  time = av_gettime() / 1000000.0;
1359  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1360  video_display(is);
1361  is->last_vis_time = time;
1362  }
1363  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1364  }
1365 
1366  if (is->video_st) {
1367  int redisplay = 0;
1368  if (is->force_refresh)
1369  redisplay = pictq_prev_picture(is);
1370 retry:
1371  if (is->pictq_size == 0) {
1372  // nothing to do, no picture to display in the queue
1373  } else {
1374  double last_duration, duration, delay;
1375  VideoPicture *vp, *lastvp;
1376 
1377  /* dequeue the picture */
1378  vp = &is->pictq[is->pictq_rindex];
1380 
1381  if (vp->serial != is->videoq.serial) {
1382  pictq_next_picture(is);
1383  is->video_current_pos = -1;
1384  redisplay = 0;
1385  goto retry;
1386  }
1387 
1388  if (lastvp->serial != vp->serial && !redisplay)
1389  is->frame_timer = av_gettime() / 1000000.0;
1390 
1391  if (is->paused)
1392  goto display;
1393 
1394  /* compute nominal last_duration */
1395  last_duration = vp_duration(is, lastvp, vp);
1396  if (redisplay)
1397  delay = 0.0;
1398  else
1399  delay = compute_target_delay(last_duration, is);
1400 
1401  time= av_gettime()/1000000.0;
1402  if (time < is->frame_timer + delay && !redisplay) {
1403  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1404  return;
1405  }
1406 
1407  is->frame_timer += delay;
1408  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1409  is->frame_timer = time;
1410 
1411  SDL_LockMutex(is->pictq_mutex);
1412  if (!redisplay && !isnan(vp->pts))
1413  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1414  SDL_UnlockMutex(is->pictq_mutex);
1415 
1416  if (is->pictq_size > 1) {
1417  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1418  duration = vp_duration(is, vp, nextvp);
1419  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1420  if (!redisplay)
1421  is->frame_drops_late++;
1422  pictq_next_picture(is);
1423  redisplay = 0;
1424  goto retry;
1425  }
1426  }
1427 
1428  if (is->subtitle_st) {
1429  while (is->subpq_size > 0) {
1430  sp = &is->subpq[is->subpq_rindex];
1431 
1432  if (is->subpq_size > 1)
1433  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1434  else
1435  sp2 = NULL;
1436 
1437  if (sp->serial != is->subtitleq.serial
1438  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1439  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1440  {
1441  free_subpicture(sp);
1442 
1443  /* update queue size and signal for next picture */
1444  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1445  is->subpq_rindex = 0;
1446 
1447  SDL_LockMutex(is->subpq_mutex);
1448  is->subpq_size--;
1449  SDL_CondSignal(is->subpq_cond);
1450  SDL_UnlockMutex(is->subpq_mutex);
1451  } else {
1452  break;
1453  }
1454  }
1455  }
1456 
1457 display:
1458  /* display picture */
1459  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1460  video_display(is);
1461 
1462  pictq_next_picture(is);
1463 
1464  if (is->step && !is->paused)
1465  stream_toggle_pause(is);
1466  }
1467  }
1468  is->force_refresh = 0;
1469  if (show_status) {
1470  static int64_t last_time;
1471  int64_t cur_time;
1472  int aqsize, vqsize, sqsize;
1473  double av_diff;
1474 
1475  cur_time = av_gettime();
1476  if (!last_time || (cur_time - last_time) >= 30000) {
1477  aqsize = 0;
1478  vqsize = 0;
1479  sqsize = 0;
1480  if (is->audio_st)
1481  aqsize = is->audioq.size;
1482  if (is->video_st)
1483  vqsize = is->videoq.size;
1484  if (is->subtitle_st)
1485  sqsize = is->subtitleq.size;
1486  av_diff = 0;
1487  if (is->audio_st && is->video_st)
1488  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1489  else if (is->video_st)
1490  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1491  else if (is->audio_st)
1492  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1493  av_log(NULL, AV_LOG_INFO,
1494  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1495  get_master_clock(is),
1496  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1497  av_diff,
1499  aqsize / 1024,
1500  vqsize / 1024,
1501  sqsize,
1504  fflush(stdout);
1505  last_time = cur_time;
1506  }
1507  }
1508 }
1509 
1510 /* allocate a picture (needs to do that in main thread to avoid
1511  potential locking problems */
1512 static void alloc_picture(VideoState *is)
1513 {
1514  VideoPicture *vp;
1515  int64_t bufferdiff;
1516 
1517  vp = &is->pictq[is->pictq_windex];
1518 
1519  free_picture(vp);
1520 
1521  video_open(is, 0, vp);
1522 
1523  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1524  SDL_YV12_OVERLAY,
1525  screen);
1526  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1527  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1528  /* SDL allocates a buffer smaller than requested if the video
1529  * overlay hardware is unable to support the requested size. */
1530  av_log(NULL, AV_LOG_FATAL,
1531  "Error: the video system does not support an image\n"
1532  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1533  "to reduce the image size.\n", vp->width, vp->height );
1534  do_exit(is);
1535  }
1536 
1537  SDL_LockMutex(is->pictq_mutex);
1538  vp->allocated = 1;
1539  SDL_CondSignal(is->pictq_cond);
1540  SDL_UnlockMutex(is->pictq_mutex);
1541 }
1542 
1543 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1544  int i, width, height;
1545  Uint8 *p, *maxp;
1546  for (i = 0; i < 3; i++) {
1547  width = bmp->w;
1548  height = bmp->h;
1549  if (i > 0) {
1550  width >>= 1;
1551  height >>= 1;
1552  }
1553  if (bmp->pitches[i] > width) {
1554  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1555  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1556  *(p+1) = *p;
1557  }
1558  }
1559 }
1560 
1561 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1562 {
1563  VideoPicture *vp;
1564 
1565 #if defined(DEBUG_SYNC) && 0
1566  printf("frame_type=%c pts=%0.3f\n",
1567  av_get_picture_type_char(src_frame->pict_type), pts);
1568 #endif
1569 
1570  /* wait until we have space to put a new picture */
1571  SDL_LockMutex(is->pictq_mutex);
1572 
1573  /* keep the last already displayed picture in the queue */
1574  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1575  !is->videoq.abort_request) {
1576  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1577  }
1578  SDL_UnlockMutex(is->pictq_mutex);
1579 
1580  if (is->videoq.abort_request)
1581  return -1;
1582 
1583  vp = &is->pictq[is->pictq_windex];
1584 
1585  vp->sar = src_frame->sample_aspect_ratio;
1586 
1587  /* alloc or resize hardware picture buffer */
1588  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1589  vp->width != src_frame->width ||
1590  vp->height != src_frame->height) {
1591  SDL_Event event;
1592 
1593  vp->allocated = 0;
1594  vp->reallocate = 0;
1595  vp->width = src_frame->width;
1596  vp->height = src_frame->height;
1597 
1598  /* the allocation must be done in the main thread to avoid
1599  locking problems. */
1600  event.type = FF_ALLOC_EVENT;
1601  event.user.data1 = is;
1602  SDL_PushEvent(&event);
1603 
1604  /* wait until the picture is allocated */
1605  SDL_LockMutex(is->pictq_mutex);
1606  while (!vp->allocated && !is->videoq.abort_request) {
1607  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1608  }
1609  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1610  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1611  while (!vp->allocated && !is->abort_request) {
1612  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1613  }
1614  }
1615  SDL_UnlockMutex(is->pictq_mutex);
1616 
1617  if (is->videoq.abort_request)
1618  return -1;
1619  }
1620 
1621  /* if the frame is not skipped, then display it */
1622  if (vp->bmp) {
1623  AVPicture pict = { { 0 } };
1624 
1625  /* get a pointer on the bitmap */
1626  SDL_LockYUVOverlay (vp->bmp);
1627 
1628  pict.data[0] = vp->bmp->pixels[0];
1629  pict.data[1] = vp->bmp->pixels[2];
1630  pict.data[2] = vp->bmp->pixels[1];
1631 
1632  pict.linesize[0] = vp->bmp->pitches[0];
1633  pict.linesize[1] = vp->bmp->pitches[2];
1634  pict.linesize[2] = vp->bmp->pitches[1];
1635 
1636 #if CONFIG_AVFILTER
1637  // FIXME use direct rendering
1638  av_picture_copy(&pict, (AVPicture *)src_frame,
1639  src_frame->format, vp->width, vp->height);
1640 #else
1641  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1643  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1644  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1645  if (is->img_convert_ctx == NULL) {
1646  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1647  exit(1);
1648  }
1649  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1650  0, vp->height, pict.data, pict.linesize);
1651 #endif
1652  /* workaround SDL PITCH_WORKAROUND */
1654  /* update the bitmap content */
1655  SDL_UnlockYUVOverlay(vp->bmp);
1656 
1657  vp->pts = pts;
1658  vp->duration = duration;
1659  vp->pos = pos;
1660  vp->serial = serial;
1661 
1662  /* now we can update the picture count */
1664  is->pictq_windex = 0;
1665  SDL_LockMutex(is->pictq_mutex);
1666  is->pictq_size++;
1667  SDL_UnlockMutex(is->pictq_mutex);
1668  }
1669  return 0;
1670 }
1671 
1672 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1673 {
1674  int got_picture;
1675 
1676  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1677  return -1;
1678 
1679  if (pkt->data == flush_pkt.data) {
1681  return 0;
1682  }
1683 
1684  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1685  return 0;
1686 
1687  if (!got_picture && !pkt->data)
1688  is->video_finished = *serial;
1689 
1690  if (got_picture) {
1691  int ret = 1;
1692  double dpts = NAN;
1693 
1694  if (decoder_reorder_pts == -1) {
1695  frame->pts = av_frame_get_best_effort_timestamp(frame);
1696  } else if (decoder_reorder_pts) {
1697  frame->pts = frame->pkt_pts;
1698  } else {
1699  frame->pts = frame->pkt_dts;
1700  }
1701 
1702  if (frame->pts != AV_NOPTS_VALUE)
1703  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1704 
1705  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1706 
1708  if (frame->pts != AV_NOPTS_VALUE) {
1709  double diff = dpts - get_master_clock(is);
1710  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1711  diff - is->frame_last_filter_delay < 0 &&
1712  *serial == is->vidclk.serial &&
1713  is->videoq.nb_packets) {
1714  is->frame_drops_early++;
1715  av_frame_unref(frame);
1716  ret = 0;
1717  }
1718  }
1719  }
1720 
1721  return ret;
1722  }
1723  return 0;
1724 }
1725 
1726 #if CONFIG_AVFILTER
1727 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1728  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1729 {
1730  int ret, i;
1731  int nb_filters = graph->nb_filters;
1732  AVFilterInOut *outputs = NULL, *inputs = NULL;
1733 
1734  if (filtergraph) {
1735  outputs = avfilter_inout_alloc();
1736  inputs = avfilter_inout_alloc();
1737  if (!outputs || !inputs) {
1738  ret = AVERROR(ENOMEM);
1739  goto fail;
1740  }
1741 
1742  outputs->name = av_strdup("in");
1743  outputs->filter_ctx = source_ctx;
1744  outputs->pad_idx = 0;
1745  outputs->next = NULL;
1746 
1747  inputs->name = av_strdup("out");
1748  inputs->filter_ctx = sink_ctx;
1749  inputs->pad_idx = 0;
1750  inputs->next = NULL;
1751 
1752  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1753  goto fail;
1754  } else {
1755  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1756  goto fail;
1757  }
1758 
1759  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1760  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1761  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1762 
1763  ret = avfilter_graph_config(graph, NULL);
1764 fail:
1765  avfilter_inout_free(&outputs);
1766  avfilter_inout_free(&inputs);
1767  return ret;
1768 }
1769 
1770 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1771 {
1772  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1773  char sws_flags_str[128];
1774  char buffersrc_args[256];
1775  int ret;
1776  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1777  AVCodecContext *codec = is->video_st->codec;
1778  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1779 
1780  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1781  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1782  graph->scale_sws_opts = av_strdup(sws_flags_str);
1783 
1784  snprintf(buffersrc_args, sizeof(buffersrc_args),
1785  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1786  frame->width, frame->height, frame->format,
1788  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1789  if (fr.num && fr.den)
1790  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1791 
1792  if ((ret = avfilter_graph_create_filter(&filt_src,
1793  avfilter_get_by_name("buffer"),
1794  "ffplay_buffer", buffersrc_args, NULL,
1795  graph)) < 0)
1796  goto fail;
1797 
1798  ret = avfilter_graph_create_filter(&filt_out,
1799  avfilter_get_by_name("buffersink"),
1800  "ffplay_buffersink", NULL, NULL, graph);
1801  if (ret < 0)
1802  goto fail;
1803 
1804  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1805  goto fail;
1806 
1807  /* SDL YUV code is not handling odd width/height for some driver
1808  * combinations, therefore we crop the picture to an even width/height. */
1809  if ((ret = avfilter_graph_create_filter(&filt_crop,
1810  avfilter_get_by_name("crop"),
1811  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1812  goto fail;
1813  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1814  goto fail;
1815 
1816  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1817  goto fail;
1818 
1819  is->in_video_filter = filt_src;
1820  is->out_video_filter = filt_out;
1821 
1822 fail:
1823  return ret;
1824 }
1825 
1826 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1827 {
1829  int sample_rates[2] = { 0, -1 };
1830  int64_t channel_layouts[2] = { 0, -1 };
1831  int channels[2] = { 0, -1 };
1832  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1833  char aresample_swr_opts[512] = "";
1834  AVDictionaryEntry *e = NULL;
1835  char asrc_args[256];
1836  int ret;
1837 
1838  avfilter_graph_free(&is->agraph);
1839  if (!(is->agraph = avfilter_graph_alloc()))
1840  return AVERROR(ENOMEM);
1841 
1842  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1843  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1844  if (strlen(aresample_swr_opts))
1845  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1846  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1847 
1848  ret = snprintf(asrc_args, sizeof(asrc_args),
1849  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1850  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1851  is->audio_filter_src.channels,
1852  1, is->audio_filter_src.freq);
1853  if (is->audio_filter_src.channel_layout)
1854  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1855  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1856 
1857  ret = avfilter_graph_create_filter(&filt_asrc,
1858  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1859  asrc_args, NULL, is->agraph);
1860  if (ret < 0)
1861  goto end;
1862 
1863 
1864  ret = avfilter_graph_create_filter(&filt_asink,
1865  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1866  NULL, NULL, is->agraph);
1867  if (ret < 0)
1868  goto end;
1869 
1870  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1871  goto end;
1872  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1873  goto end;
1874 
1875  if (force_output_format) {
1876  channel_layouts[0] = is->audio_tgt.channel_layout;
1877  channels [0] = is->audio_tgt.channels;
1878  sample_rates [0] = is->audio_tgt.freq;
1879  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1880  goto end;
1881  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1882  goto end;
1883  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1884  goto end;
1885  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1886  goto end;
1887  }
1888 
1889 
1890  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1891  goto end;
1892 
1893  is->in_audio_filter = filt_asrc;
1894  is->out_audio_filter = filt_asink;
1895 
1896 end:
1897  if (ret < 0)
1898  avfilter_graph_free(&is->agraph);
1899  return ret;
1900 }
1901 #endif /* CONFIG_AVFILTER */
1902 
1903 static int video_thread(void *arg)
1904 {
1905  AVPacket pkt = { 0 };
1906  VideoState *is = arg;
1907  AVFrame *frame = av_frame_alloc();
1908  double pts;
1909  double duration;
1910  int ret;
1911  int serial = 0;
1913  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
1914 
1915 #if CONFIG_AVFILTER
1917  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1918  int last_w = 0;
1919  int last_h = 0;
1920  enum AVPixelFormat last_format = -2;
1921  int last_serial = -1;
1922 #endif
1923 
1924  for (;;) {
1925  while (is->paused && !is->videoq.abort_request)
1926  SDL_Delay(10);
1927 
1928  av_free_packet(&pkt);
1929 
1930  ret = get_video_frame(is, frame, &pkt, &serial);
1931  if (ret < 0)
1932  goto the_end;
1933  if (!ret)
1934  continue;
1935 
1936 #if CONFIG_AVFILTER
1937  if ( last_w != frame->width
1938  || last_h != frame->height
1939  || last_format != frame->format
1940  || last_serial != serial) {
1941  av_log(NULL, AV_LOG_DEBUG,
1942  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1943  last_w, last_h,
1944  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1945  frame->width, frame->height,
1946  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1947  avfilter_graph_free(&graph);
1948  graph = avfilter_graph_alloc();
1949  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1950  SDL_Event event;
1951  event.type = FF_QUIT_EVENT;
1952  event.user.data1 = is;
1953  SDL_PushEvent(&event);
1954  goto the_end;
1955  }
1956  filt_in = is->in_video_filter;
1957  filt_out = is->out_video_filter;
1958  last_w = frame->width;
1959  last_h = frame->height;
1960  last_format = frame->format;
1961  last_serial = serial;
1962  frame_rate = filt_out->inputs[0]->frame_rate;
1963  }
1964 
1965  ret = av_buffersrc_add_frame(filt_in, frame);
1966  if (ret < 0)
1967  goto the_end;
1968 
1969  while (ret >= 0) {
1970  is->frame_last_returned_time = av_gettime() / 1000000.0;
1971 
1972  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1973  if (ret < 0) {
1974  if (ret == AVERROR_EOF)
1975  is->video_finished = serial;
1976  ret = 0;
1977  break;
1978  }
1979 
1981  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1982  is->frame_last_filter_delay = 0;
1983  tb = filt_out->inputs[0]->time_base;
1984 #endif
1985  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
1986  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
1987  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), serial);
1988  av_frame_unref(frame);
1989 #if CONFIG_AVFILTER
1990  }
1991 #endif
1992 
1993  if (ret < 0)
1994  goto the_end;
1995  }
1996  the_end:
1997 #if CONFIG_AVFILTER
1998  avfilter_graph_free(&graph);
1999 #endif
2000  av_free_packet(&pkt);
2001  av_frame_free(&frame);
2002  return 0;
2003 }
2004 
2005 static int subtitle_thread(void *arg)
2006 {
2007  VideoState *is = arg;
2008  SubPicture *sp;
2009  AVPacket pkt1, *pkt = &pkt1;
2010  int got_subtitle;
2011  int serial;
2012  double pts;
2013  int i, j;
2014  int r, g, b, y, u, v, a;
2015 
2016  for (;;) {
2017  while (is->paused && !is->subtitleq.abort_request) {
2018  SDL_Delay(10);
2019  }
2020  if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2021  break;
2022 
2023  if (pkt->data == flush_pkt.data) {
2025  continue;
2026  }
2027  SDL_LockMutex(is->subpq_mutex);
2028  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2029  !is->subtitleq.abort_request) {
2030  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2031  }
2032  SDL_UnlockMutex(is->subpq_mutex);
2033 
2034  if (is->subtitleq.abort_request)
2035  return 0;
2036 
2037  sp = &is->subpq[is->subpq_windex];
2038 
2039  /* NOTE: ipts is the PTS of the _first_ picture beginning in
2040  this packet, if any */
2041  pts = 0;
2042  if (pkt->pts != AV_NOPTS_VALUE)
2043  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2044 
2046  &got_subtitle, pkt);
2047  if (got_subtitle && sp->sub.format == 0) {
2048  if (sp->sub.pts != AV_NOPTS_VALUE)
2049  pts = sp->sub.pts / (double)AV_TIME_BASE;
2050  sp->pts = pts;
2051  sp->serial = serial;
2052 
2053  for (i = 0; i < sp->sub.num_rects; i++)
2054  {
2055  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2056  {
2057  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2058  y = RGB_TO_Y_CCIR(r, g, b);
2059  u = RGB_TO_U_CCIR(r, g, b, 0);
2060  v = RGB_TO_V_CCIR(r, g, b, 0);
2061  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2062  }
2063  }
2064 
2065  /* now we can update the picture count */
2066  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2067  is->subpq_windex = 0;
2068  SDL_LockMutex(is->subpq_mutex);
2069  is->subpq_size++;
2070  SDL_UnlockMutex(is->subpq_mutex);
2071  } else if (got_subtitle) {
2072  avsubtitle_free(&sp->sub);
2073  }
2074  av_free_packet(pkt);
2075  }
2076  return 0;
2077 }
2078 
2079 /* copy samples for viewing in editor window */
2080 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2081 {
2082  int size, len;
2083 
2084  size = samples_size / sizeof(short);
2085  while (size > 0) {
2087  if (len > size)
2088  len = size;
2089  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2090  samples += len;
2091  is->sample_array_index += len;
2093  is->sample_array_index = 0;
2094  size -= len;
2095  }
2096 }
2097 
2098 /* return the wanted number of samples to get better sync if sync_type is video
2099  * or external master clock */
2100 static int synchronize_audio(VideoState *is, int nb_samples)
2101 {
2102  int wanted_nb_samples = nb_samples;
2103 
2104  /* if not master, then we try to remove or add samples to correct the clock */
2106  double diff, avg_diff;
2107  int min_nb_samples, max_nb_samples;
2108 
2109  diff = get_clock(&is->audclk) - get_master_clock(is);
2110 
2111  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2112  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2114  /* not enough measures to have a correct estimate */
2115  is->audio_diff_avg_count++;
2116  } else {
2117  /* estimate the A-V difference */
2118  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2119 
2120  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2121  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2122  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2123  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2124  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2125  }
2126  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2127  diff, avg_diff, wanted_nb_samples - nb_samples,
2129  }
2130  } else {
2131  /* too big difference : may be initial PTS errors, so
2132  reset A-V filter */
2133  is->audio_diff_avg_count = 0;
2134  is->audio_diff_cum = 0;
2135  }
2136  }
2137 
2138  return wanted_nb_samples;
2139 }
2140 
2141 /**
2142  * Decode one audio frame and return its uncompressed size.
2143  *
2144  * The processed audio frame is decoded, converted if required, and
2145  * stored in is->audio_buf, with size in bytes given by the return
2146  * value.
2147  */
2149 {
2150  AVPacket *pkt_temp = &is->audio_pkt_temp;
2151  AVPacket *pkt = &is->audio_pkt;
2152  AVCodecContext *dec = is->audio_st->codec;
2153  int len1, data_size, resampled_data_size;
2154  int64_t dec_channel_layout;
2155  int got_frame;
2156  av_unused double audio_clock0;
2157  int wanted_nb_samples;
2158  AVRational tb;
2159  int ret;
2160  int reconfigure;
2161 
2162  for (;;) {
2163  /* NOTE: the audio packet can contain several frames */
2164  while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2165  if (!is->frame) {
2166  if (!(is->frame = av_frame_alloc()))
2167  return AVERROR(ENOMEM);
2168  } else {
2169  av_frame_unref(is->frame);
2170  }
2171 
2172  if (is->audioq.serial != is->audio_pkt_temp_serial)
2173  break;
2174 
2175  if (is->paused)
2176  return -1;
2177 
2178  if (!is->audio_buf_frames_pending) {
2179  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2180  if (len1 < 0) {
2181  /* if error, we skip the frame */
2182  pkt_temp->size = 0;
2183  break;
2184  }
2185 
2186  pkt_temp->dts =
2187  pkt_temp->pts = AV_NOPTS_VALUE;
2188  pkt_temp->data += len1;
2189  pkt_temp->size -= len1;
2190  if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2191  pkt_temp->stream_index = -1;
2192  if (!pkt_temp->data && !got_frame)
2194 
2195  if (!got_frame)
2196  continue;
2197 
2198  tb = (AVRational){1, is->frame->sample_rate};
2199  if (is->frame->pts != AV_NOPTS_VALUE)
2200  is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2201  else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2202  is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2203  else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2204 #if CONFIG_AVFILTER
2205  is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2206 #else
2208 #endif
2209 
2210  if (is->frame->pts != AV_NOPTS_VALUE)
2211  is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2212 
2213 #if CONFIG_AVFILTER
2214  dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2215 
2216  reconfigure =
2217  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2218  is->frame->format, av_frame_get_channels(is->frame)) ||
2219  is->audio_filter_src.channel_layout != dec_channel_layout ||
2220  is->audio_filter_src.freq != is->frame->sample_rate ||
2222 
2223  if (reconfigure) {
2224  char buf1[1024], buf2[1024];
2225  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2226  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2227  av_log(NULL, AV_LOG_DEBUG,
2228  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2229  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2231 
2232  is->audio_filter_src.fmt = is->frame->format;
2233  is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2234  is->audio_filter_src.channel_layout = dec_channel_layout;
2235  is->audio_filter_src.freq = is->frame->sample_rate;
2237 
2238  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2239  return ret;
2240  }
2241 
2242  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2243  return ret;
2244 #endif
2245  }
2246 #if CONFIG_AVFILTER
2247  if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2248  if (ret == AVERROR(EAGAIN)) {
2249  is->audio_buf_frames_pending = 0;
2250  continue;
2251  }
2252  if (ret == AVERROR_EOF)
2254  return ret;
2255  }
2256  is->audio_buf_frames_pending = 1;
2257  tb = is->out_audio_filter->inputs[0]->time_base;
2258 #endif
2259 
2261  is->frame->nb_samples,
2262  is->frame->format, 1);
2263 
2264  dec_channel_layout =
2267  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2268 
2269  if (is->frame->format != is->audio_src.fmt ||
2270  dec_channel_layout != is->audio_src.channel_layout ||
2271  is->frame->sample_rate != is->audio_src.freq ||
2272  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2273  swr_free(&is->swr_ctx);
2274  is->swr_ctx = swr_alloc_set_opts(NULL,
2276  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2277  0, NULL);
2278  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2279  av_log(NULL, AV_LOG_ERROR,
2280  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2283  break;
2284  }
2285  is->audio_src.channel_layout = dec_channel_layout;
2287  is->audio_src.freq = is->frame->sample_rate;
2288  is->audio_src.fmt = is->frame->format;
2289  }
2290 
2291  if (is->swr_ctx) {
2292  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2293  uint8_t **out = &is->audio_buf1;
2294  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2295  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2296  int len2;
2297  if (out_size < 0) {
2298  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2299  break;
2300  }
2301  if (wanted_nb_samples != is->frame->nb_samples) {
2302  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2303  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2304  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2305  break;
2306  }
2307  }
2308  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2309  if (!is->audio_buf1)
2310  return AVERROR(ENOMEM);
2311  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2312  if (len2 < 0) {
2313  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2314  break;
2315  }
2316  if (len2 == out_count) {
2317  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2318  swr_init(is->swr_ctx);
2319  }
2320  is->audio_buf = is->audio_buf1;
2321  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2322  } else {
2323  is->audio_buf = is->frame->data[0];
2324  resampled_data_size = data_size;
2325  }
2326 
2327  audio_clock0 = is->audio_clock;
2328  /* update the audio clock with the pts */
2329  if (is->frame->pts != AV_NOPTS_VALUE)
2330  is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2331  else
2332  is->audio_clock = NAN;
2334 #ifdef DEBUG
2335  {
2336  static double last_clock;
2337  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2338  is->audio_clock - last_clock,
2339  is->audio_clock, audio_clock0);
2340  last_clock = is->audio_clock;
2341  }
2342 #endif
2343  return resampled_data_size;
2344  }
2345 
2346  /* free the current packet */
2347  if (pkt->data)
2349  memset(pkt_temp, 0, sizeof(*pkt_temp));
2350  pkt_temp->stream_index = -1;
2351 
2352  if (is->audioq.abort_request) {
2353  return -1;
2354  }
2355 
2356  if (is->audioq.nb_packets == 0)
2357  SDL_CondSignal(is->continue_read_thread);
2358 
2359  /* read next packet */
2360  if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2361  return -1;
2362 
2363  if (pkt->data == flush_pkt.data) {
2364  avcodec_flush_buffers(dec);
2365  is->audio_buf_frames_pending = 0;
2369  }
2370 
2371  *pkt_temp = *pkt;
2372  }
2373 }
2374 
2375 /* prepare a new audio buffer */
2376 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2377 {
2378  VideoState *is = opaque;
2379  int audio_size, len1;
2380 
2382 
2383  while (len > 0) {
2384  if (is->audio_buf_index >= is->audio_buf_size) {
2385  audio_size = audio_decode_frame(is);
2386  if (audio_size < 0) {
2387  /* if error, just output silence */
2388  is->audio_buf = is->silence_buf;
2389  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2390  } else {
2391  if (is->show_mode != SHOW_MODE_VIDEO)
2392  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2393  is->audio_buf_size = audio_size;
2394  }
2395  is->audio_buf_index = 0;
2396  }
2397  len1 = is->audio_buf_size - is->audio_buf_index;
2398  if (len1 > len)
2399  len1 = len;
2400  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2401  len -= len1;
2402  stream += len1;
2403  is->audio_buf_index += len1;
2404  }
2406  /* Let's assume the audio driver that is used by SDL has two periods. */
2407  if (!isnan(is->audio_clock)) {
2409  sync_clock_to_slave(&is->extclk, &is->audclk);
2410  }
2411 }
2412 
2413 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2414 {
2415  SDL_AudioSpec wanted_spec, spec;
2416  const char *env;
2417  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2418 
2419  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2420  if (env) {
2421  wanted_nb_channels = atoi(env);
2422  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2423  }
2424  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2425  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2426  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2427  }
2428  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2429  wanted_spec.freq = wanted_sample_rate;
2430  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2431  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2432  return -1;
2433  }
2434  wanted_spec.format = AUDIO_S16SYS;
2435  wanted_spec.silence = 0;
2436  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2437  wanted_spec.callback = sdl_audio_callback;
2438  wanted_spec.userdata = opaque;
2439  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2440  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2441  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2442  if (!wanted_spec.channels) {
2443  av_log(NULL, AV_LOG_ERROR,
2444  "No more channel combinations to try, audio open failed\n");
2445  return -1;
2446  }
2447  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2448  }
2449  if (spec.format != AUDIO_S16SYS) {
2450  av_log(NULL, AV_LOG_ERROR,
2451  "SDL advised audio format %d is not supported!\n", spec.format);
2452  return -1;
2453  }
2454  if (spec.channels != wanted_spec.channels) {
2455  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2456  if (!wanted_channel_layout) {
2457  av_log(NULL, AV_LOG_ERROR,
2458  "SDL advised channel count %d is not supported!\n", spec.channels);
2459  return -1;
2460  }
2461  }
2462 
2463  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2464  audio_hw_params->freq = spec.freq;
2465  audio_hw_params->channel_layout = wanted_channel_layout;
2466  audio_hw_params->channels = spec.channels;
2467  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2468  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2469  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2470  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2471  return -1;
2472  }
2473  return spec.size;
2474 }
2475 
2476 /* open a given stream. Return 0 if OK */
2477 static int stream_component_open(VideoState *is, int stream_index)
2478 {
2479  AVFormatContext *ic = is->ic;
2480  AVCodecContext *avctx;
2481  AVCodec *codec;
2482  const char *forced_codec_name = NULL;
2483  AVDictionary *opts;
2484  AVDictionaryEntry *t = NULL;
2485  int sample_rate, nb_channels;
2486  int64_t channel_layout;
2487  int ret;
2488  int stream_lowres = lowres;
2489 
2490  if (stream_index < 0 || stream_index >= ic->nb_streams)
2491  return -1;
2492  avctx = ic->streams[stream_index]->codec;
2493 
2494  codec = avcodec_find_decoder(avctx->codec_id);
2495 
2496  switch(avctx->codec_type){
2497  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2498  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2499  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2500  }
2501  if (forced_codec_name)
2502  codec = avcodec_find_decoder_by_name(forced_codec_name);
2503  if (!codec) {
2504  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2505  "No codec could be found with name '%s'\n", forced_codec_name);
2506  else av_log(NULL, AV_LOG_WARNING,
2507  "No codec could be found with id %d\n", avctx->codec_id);
2508  return -1;
2509  }
2510 
2511  avctx->codec_id = codec->id;
2513  if(stream_lowres > av_codec_get_max_lowres(codec)){
2514  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2515  av_codec_get_max_lowres(codec));
2516  stream_lowres = av_codec_get_max_lowres(codec);
2517  }
2518  av_codec_set_lowres(avctx, stream_lowres);
2520 
2521  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2522  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2523  if(codec->capabilities & CODEC_CAP_DR1)
2524  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2525 
2526  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2527  if (!av_dict_get(opts, "threads", NULL, 0))
2528  av_dict_set(&opts, "threads", "auto", 0);
2529  if (stream_lowres)
2530  av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
2531  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2532  av_dict_set(&opts, "refcounted_frames", "1", 0);
2533  if (avcodec_open2(avctx, codec, &opts) < 0)
2534  return -1;
2535  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2536  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2537  return AVERROR_OPTION_NOT_FOUND;
2538  }
2539 
2540  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2541  switch (avctx->codec_type) {
2542  case AVMEDIA_TYPE_AUDIO:
2543 #if CONFIG_AVFILTER
2544  {
2545  AVFilterLink *link;
2546 
2547  is->audio_filter_src.freq = avctx->sample_rate;
2548  is->audio_filter_src.channels = avctx->channels;
2549  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2550  is->audio_filter_src.fmt = avctx->sample_fmt;
2551  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2552  return ret;
2553  link = is->out_audio_filter->inputs[0];
2554  sample_rate = link->sample_rate;
2555  nb_channels = link->channels;
2556  channel_layout = link->channel_layout;
2557  }
2558 #else
2559  sample_rate = avctx->sample_rate;
2560  nb_channels = avctx->channels;
2561  channel_layout = avctx->channel_layout;
2562 #endif
2563 
2564  /* prepare audio output */
2565  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2566  return ret;
2567  is->audio_hw_buf_size = ret;
2568  is->audio_src = is->audio_tgt;
2569  is->audio_buf_size = 0;
2570  is->audio_buf_index = 0;
2571 
2572  /* init averaging filter */
2573  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2574  is->audio_diff_avg_count = 0;
2575  /* since we do not have a precise anough audio fifo fullness,
2576  we correct audio sync only if larger than this threshold */
2578 
2579  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2580  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2581  is->audio_pkt_temp.stream_index = -1;
2582 
2583  is->audio_stream = stream_index;
2584  is->audio_st = ic->streams[stream_index];
2585 
2586  packet_queue_start(&is->audioq);
2587  SDL_PauseAudio(0);
2588  break;
2589  case AVMEDIA_TYPE_VIDEO:
2590  is->video_stream = stream_index;
2591  is->video_st = ic->streams[stream_index];
2592 
2593  packet_queue_start(&is->videoq);
2594  is->video_tid = SDL_CreateThread(video_thread, is);
2595  is->queue_attachments_req = 1;
2596  break;
2597  case AVMEDIA_TYPE_SUBTITLE:
2598  is->subtitle_stream = stream_index;
2599  is->subtitle_st = ic->streams[stream_index];
2601 
2602  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2603  break;
2604  default:
2605  break;
2606  }
2607  return 0;
2608 }
2609 
2610 static void stream_component_close(VideoState *is, int stream_index)
2611 {
2612  AVFormatContext *ic = is->ic;
2613  AVCodecContext *avctx;
2614 
2615  if (stream_index < 0 || stream_index >= ic->nb_streams)
2616  return;
2617  avctx = ic->streams[stream_index]->codec;
2618 
2619  switch (avctx->codec_type) {
2620  case AVMEDIA_TYPE_AUDIO:
2621  packet_queue_abort(&is->audioq);
2622 
2623  SDL_CloseAudio();
2624 
2625  packet_queue_flush(&is->audioq);
2626  av_free_packet(&is->audio_pkt);
2627  swr_free(&is->swr_ctx);
2628  av_freep(&is->audio_buf1);
2629  is->audio_buf1_size = 0;
2630  is->audio_buf = NULL;
2631  av_frame_free(&is->frame);
2632 
2633  if (is->rdft) {
2634  av_rdft_end(is->rdft);
2635  av_freep(&is->rdft_data);
2636  is->rdft = NULL;
2637  is->rdft_bits = 0;
2638  }
2639 #if CONFIG_AVFILTER
2640  avfilter_graph_free(&is->agraph);
2641 #endif
2642  break;
2643  case AVMEDIA_TYPE_VIDEO:
2644  packet_queue_abort(&is->videoq);
2645 
2646  /* note: we also signal this mutex to make sure we deblock the
2647  video thread in all cases */
2648  SDL_LockMutex(is->pictq_mutex);
2649  SDL_CondSignal(is->pictq_cond);
2650  SDL_UnlockMutex(is->pictq_mutex);
2651 
2652  SDL_WaitThread(is->video_tid, NULL);
2653 
2654  packet_queue_flush(&is->videoq);
2655  break;
2656  case AVMEDIA_TYPE_SUBTITLE:
2658 
2659  /* note: we also signal this mutex to make sure we deblock the
2660  video thread in all cases */
2661  SDL_LockMutex(is->subpq_mutex);
2662  SDL_CondSignal(is->subpq_cond);
2663  SDL_UnlockMutex(is->subpq_mutex);
2664 
2665  SDL_WaitThread(is->subtitle_tid, NULL);
2666 
2668  break;
2669  default:
2670  break;
2671  }
2672 
2673  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2674  avcodec_close(avctx);
2675  switch (avctx->codec_type) {
2676  case AVMEDIA_TYPE_AUDIO:
2677  is->audio_st = NULL;
2678  is->audio_stream = -1;
2679  break;
2680  case AVMEDIA_TYPE_VIDEO:
2681  is->video_st = NULL;
2682  is->video_stream = -1;
2683  break;
2684  case AVMEDIA_TYPE_SUBTITLE:
2685  is->subtitle_st = NULL;
2686  is->subtitle_stream = -1;
2687  break;
2688  default:
2689  break;
2690  }
2691 }
2692 
2693 static int decode_interrupt_cb(void *ctx)
2694 {
2695  VideoState *is = ctx;
2696  return is->abort_request;
2697 }
2698 
2700 {
2701  if( !strcmp(s->iformat->name, "rtp")
2702  || !strcmp(s->iformat->name, "rtsp")
2703  || !strcmp(s->iformat->name, "sdp")
2704  )
2705  return 1;
2706 
2707  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2708  || !strncmp(s->filename, "udp:", 4)
2709  )
2710  )
2711  return 1;
2712  return 0;
2713 }
2714 
2715 /* this thread gets the stream from the disk or the network */
2716 static int read_thread(void *arg)
2717 {
2718  VideoState *is = arg;
2719  AVFormatContext *ic = NULL;
2720  int err, i, ret;
2721  int st_index[AVMEDIA_TYPE_NB];
2722  AVPacket pkt1, *pkt = &pkt1;
2723  int eof = 0;
2724  int64_t stream_start_time;
2725  int pkt_in_play_range = 0;
2727  AVDictionary **opts;
2728  int orig_nb_streams;
2729  SDL_mutex *wait_mutex = SDL_CreateMutex();
2730 
2731  memset(st_index, -1, sizeof(st_index));
2732  is->last_video_stream = is->video_stream = -1;
2733  is->last_audio_stream = is->audio_stream = -1;
2734  is->last_subtitle_stream = is->subtitle_stream = -1;
2735 
2736  ic = avformat_alloc_context();
2738  ic->interrupt_callback.opaque = is;
2739  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2740  if (err < 0) {
2741  print_error(is->filename, err);
2742  ret = -1;
2743  goto fail;
2744  }
2745  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2746  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2748  goto fail;
2749  }
2750  is->ic = ic;
2751 
2752  if (genpts)
2753  ic->flags |= AVFMT_FLAG_GENPTS;
2754 
2756  orig_nb_streams = ic->nb_streams;
2757 
2758  err = avformat_find_stream_info(ic, opts);
2759  if (err < 0) {
2760  av_log(NULL, AV_LOG_WARNING,
2761  "%s: could not find codec parameters\n", is->filename);
2762  ret = -1;
2763  goto fail;
2764  }
2765  for (i = 0; i < orig_nb_streams; i++)
2766  av_dict_free(&opts[i]);
2767  av_freep(&opts);
2768 
2769  if (ic->pb)
2770  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2771 
2772  if (seek_by_bytes < 0)
2773  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2774 
2775  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2776 
2777  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2778  window_title = av_asprintf("%s - %s", t->value, input_filename);
2779 
2780  /* if seeking requested, we execute it */
2781  if (start_time != AV_NOPTS_VALUE) {
2782  int64_t timestamp;
2783 
2784  timestamp = start_time;
2785  /* add the stream start time */
2786  if (ic->start_time != AV_NOPTS_VALUE)
2787  timestamp += ic->start_time;
2788  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2789  if (ret < 0) {
2790  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2791  is->filename, (double)timestamp / AV_TIME_BASE);
2792  }
2793  }
2794 
2795  is->realtime = is_realtime(ic);
2796 
2797  for (i = 0; i < ic->nb_streams; i++)
2798  ic->streams[i]->discard = AVDISCARD_ALL;
2799  if (!video_disable)
2800  st_index[AVMEDIA_TYPE_VIDEO] =
2802  wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2803  if (!audio_disable)
2804  st_index[AVMEDIA_TYPE_AUDIO] =
2807  st_index[AVMEDIA_TYPE_VIDEO],
2808  NULL, 0);
2810  st_index[AVMEDIA_TYPE_SUBTITLE] =
2813  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2814  st_index[AVMEDIA_TYPE_AUDIO] :
2815  st_index[AVMEDIA_TYPE_VIDEO]),
2816  NULL, 0);
2817  if (show_status) {
2818  av_dump_format(ic, 0, is->filename, 0);
2819  }
2820 
2821  is->show_mode = show_mode;
2822  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2823  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2824  AVCodecContext *avctx = st->codec;
2825  VideoPicture vp = {.width = avctx->width, .height = avctx->height, .sar = av_guess_sample_aspect_ratio(ic, st, NULL)};
2826  if (vp.width)
2828  }
2829 
2830  /* open the streams */
2831  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2832  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2833  }
2834 
2835  ret = -1;
2836  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2837  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2838  }
2839  if (is->show_mode == SHOW_MODE_NONE)
2840  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2841 
2842  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2843  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2844  }
2845 
2846  if (is->video_stream < 0 && is->audio_stream < 0) {
2847  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2848  is->filename);
2849  ret = -1;
2850  goto fail;
2851  }
2852 
2853  if (infinite_buffer < 0 && is->realtime)
2854  infinite_buffer = 1;
2855 
2856  for (;;) {
2857  if (is->abort_request)
2858  break;
2859  if (is->paused != is->last_paused) {
2860  is->last_paused = is->paused;
2861  if (is->paused)
2862  is->read_pause_return = av_read_pause(ic);
2863  else
2864  av_read_play(ic);
2865  }
2866 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2867  if (is->paused &&
2868  (!strcmp(ic->iformat->name, "rtsp") ||
2869  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2870  /* wait 10 ms to avoid trying to get another packet */
2871  /* XXX: horrible */
2872  SDL_Delay(10);
2873  continue;
2874  }
2875 #endif
2876  if (is->seek_req) {
2877  int64_t seek_target = is->seek_pos;
2878  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2879  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2880 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2881 // of the seek_pos/seek_rel variables
2882 
2883  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2884  if (ret < 0) {
2885  av_log(NULL, AV_LOG_ERROR,
2886  "%s: error while seeking\n", is->ic->filename);
2887  } else {
2888  if (is->audio_stream >= 0) {
2889  packet_queue_flush(&is->audioq);
2890  packet_queue_put(&is->audioq, &flush_pkt);
2891  }
2892  if (is->subtitle_stream >= 0) {
2894  packet_queue_put(&is->subtitleq, &flush_pkt);
2895  }
2896  if (is->video_stream >= 0) {
2897  packet_queue_flush(&is->videoq);
2898  packet_queue_put(&is->videoq, &flush_pkt);
2899  }
2900  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2901  set_clock(&is->extclk, NAN, 0);
2902  } else {
2903  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2904  }
2905  }
2906  is->seek_req = 0;
2907  is->queue_attachments_req = 1;
2908  eof = 0;
2909  if (is->paused)
2910  step_to_next_frame(is);
2911  }
2912  if (is->queue_attachments_req) {
2914  AVPacket copy;
2915  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2916  goto fail;
2917  packet_queue_put(&is->videoq, &copy);
2919  }
2920  is->queue_attachments_req = 0;
2921  }
2922 
2923  /* if the queue are full, no need to read more */
2924  if (infinite_buffer<1 &&
2925  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2926  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2927  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2929  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2930  /* wait 10 ms */
2931  SDL_LockMutex(wait_mutex);
2932  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2933  SDL_UnlockMutex(wait_mutex);
2934  continue;
2935  }
2936  if (!is->paused &&
2937  (!is->audio_st || is->audio_finished == is->audioq.serial) &&
2938  (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
2939  if (loop != 1 && (!loop || --loop)) {
2940  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2941  } else if (autoexit) {
2942  ret = AVERROR_EOF;
2943  goto fail;
2944  }
2945  }
2946  if (eof) {
2947  if (is->video_stream >= 0)
2949  if (is->audio_stream >= 0)
2951  if (is->subtitle_stream >= 0)
2953  SDL_Delay(10);
2954  eof=0;
2955  continue;
2956  }
2957  ret = av_read_frame(ic, pkt);
2958  if (ret < 0) {
2959  if (ret == AVERROR_EOF || url_feof(ic->pb))
2960  eof = 1;
2961  if (ic->pb && ic->pb->error)
2962  break;
2963  SDL_LockMutex(wait_mutex);
2964  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2965  SDL_UnlockMutex(wait_mutex);
2966  continue;
2967  }
2968  /* check if packet is in play range specified by user, then queue, otherwise discard */
2969  stream_start_time = ic->streams[pkt->stream_index]->start_time;
2970  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2971  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
2972  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2973  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2974  <= ((double)duration / 1000000);
2975  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2976  packet_queue_put(&is->audioq, pkt);
2977  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2979  packet_queue_put(&is->videoq, pkt);
2980  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2981  packet_queue_put(&is->subtitleq, pkt);
2982  } else {
2983  av_free_packet(pkt);
2984  }
2985  }
2986  /* wait until the end */
2987  while (!is->abort_request) {
2988  SDL_Delay(100);
2989  }
2990 
2991  ret = 0;
2992  fail:
2993  /* close each stream */
2994  if (is->audio_stream >= 0)
2996  if (is->video_stream >= 0)
2998  if (is->subtitle_stream >= 0)
3000  if (is->ic) {
3001  avformat_close_input(&is->ic);
3002  }
3003 
3004  if (ret != 0) {
3005  SDL_Event event;
3006 
3007  event.type = FF_QUIT_EVENT;
3008  event.user.data1 = is;
3009  SDL_PushEvent(&event);
3010  }
3011  SDL_DestroyMutex(wait_mutex);
3012  return 0;
3013 }
3014 
3015 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3016 {
3017  VideoState *is;
3018 
3019  is = av_mallocz(sizeof(VideoState));
3020  if (!is)
3021  return NULL;
3022  av_strlcpy(is->filename, filename, sizeof(is->filename));
3023  is->iformat = iformat;
3024  is->ytop = 0;
3025  is->xleft = 0;
3026 
3027  /* start video display */
3028  is->pictq_mutex = SDL_CreateMutex();
3029  is->pictq_cond = SDL_CreateCond();
3030 
3031  is->subpq_mutex = SDL_CreateMutex();
3032  is->subpq_cond = SDL_CreateCond();
3033 
3034  packet_queue_init(&is->videoq);
3035  packet_queue_init(&is->audioq);
3037 
3038  is->continue_read_thread = SDL_CreateCond();
3039 
3040  init_clock(&is->vidclk, &is->videoq.serial);
3041  init_clock(&is->audclk, &is->audioq.serial);
3042  init_clock(&is->extclk, &is->extclk.serial);
3043  is->audio_clock_serial = -1;
3044  is->audio_last_serial = -1;
3045  is->av_sync_type = av_sync_type;
3046  is->read_tid = SDL_CreateThread(read_thread, is);
3047  if (!is->read_tid) {
3048  av_free(is);
3049  return NULL;
3050  }
3051  return is;
3052 }
3053 
3055 {
3056  AVFormatContext *ic = is->ic;
3057  int start_index, stream_index;
3058  int old_index;
3059  AVStream *st;
3060  AVProgram *p = NULL;
3061  int nb_streams = is->ic->nb_streams;
3062 
3063  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3064  start_index = is->last_video_stream;
3065  old_index = is->video_stream;
3066  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3067  start_index = is->last_audio_stream;
3068  old_index = is->audio_stream;
3069  } else {
3070  start_index = is->last_subtitle_stream;
3071  old_index = is->subtitle_stream;
3072  }
3073  stream_index = start_index;
3074 
3075  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3076  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3077  if (p) {
3078  nb_streams = p->nb_stream_indexes;
3079  for (start_index = 0; start_index < nb_streams; start_index++)
3080  if (p->stream_index[start_index] == stream_index)
3081  break;
3082  if (start_index == nb_streams)
3083  start_index = -1;
3084  stream_index = start_index;
3085  }
3086  }
3087 
3088  for (;;) {
3089  if (++stream_index >= nb_streams)
3090  {
3091  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3092  {
3093  stream_index = -1;
3094  is->last_subtitle_stream = -1;
3095  goto the_end;
3096  }
3097  if (start_index == -1)
3098  return;
3099  stream_index = 0;
3100  }
3101  if (stream_index == start_index)
3102  return;
3103  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3104  if (st->codec->codec_type == codec_type) {
3105  /* check that parameters are OK */
3106  switch (codec_type) {
3107  case AVMEDIA_TYPE_AUDIO:
3108  if (st->codec->sample_rate != 0 &&
3109  st->codec->channels != 0)
3110  goto the_end;
3111  break;
3112  case AVMEDIA_TYPE_VIDEO:
3113  case AVMEDIA_TYPE_SUBTITLE:
3114  goto the_end;
3115  default:
3116  break;
3117  }
3118  }
3119  }
3120  the_end:
3121  if (p && stream_index != -1)
3122  stream_index = p->stream_index[stream_index];
3123  stream_component_close(is, old_index);
3124  stream_component_open(is, stream_index);
3125 }
3126 
3127 
3129 {
3130 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3131  /* OS X needs to reallocate the SDL overlays */
3132  int i;
3133  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3134  is->pictq[i].reallocate = 1;
3135 #endif
3137  video_open(is, 1, NULL);
3138 }
3139 
3141 {
3142  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3143  int next = is->show_mode;
3144  do {
3145  next = (next + 1) % SHOW_MODE_NB;
3146  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3147  if (is->show_mode != next) {
3149  is->xleft, is->ytop, is->width, is->height,
3150  bgcolor, 1);
3151  is->force_refresh = 1;
3152  is->show_mode = next;
3153  }
3154 }
3155 
3156 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3157  double remaining_time = 0.0;
3158  SDL_PumpEvents();
3159  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3161  SDL_ShowCursor(0);
3162  cursor_hidden = 1;
3163  }
3164  if (remaining_time > 0.0)
3165  av_usleep((int64_t)(remaining_time * 1000000.0));
3166  remaining_time = REFRESH_RATE;
3167  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3168  video_refresh(is, &remaining_time);
3169  SDL_PumpEvents();
3170  }
3171 }
3172 
3173 static void seek_chapter(VideoState *is, int incr)
3174 {
3175  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3176  int i;
3177 
3178  if (!is->ic->nb_chapters)
3179  return;
3180 
3181  /* find the current chapter */
3182  for (i = 0; i < is->ic->nb_chapters; i++) {
3183  AVChapter *ch = is->ic->chapters[i];
3184  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3185  i--;
3186  break;
3187  }
3188  }
3189 
3190  i += incr;
3191  i = FFMAX(i, 0);
3192  if (i >= is->ic->nb_chapters)
3193  return;
3194 
3195  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3196  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3197  AV_TIME_BASE_Q), 0, 0);
3198 }
3199 
3200 /* handle an event sent by the GUI */
3201 static void event_loop(VideoState *cur_stream)
3202 {
3203  SDL_Event event;
3204  double incr, pos, frac;
3205 
3206  for (;;) {
3207  double x;
3208  refresh_loop_wait_event(cur_stream, &event);
3209  switch (event.type) {
3210  case SDL_KEYDOWN:
3211  if (exit_on_keydown) {
3212  do_exit(cur_stream);
3213  break;
3214  }
3215  switch (event.key.keysym.sym) {
3216  case SDLK_ESCAPE:
3217  case SDLK_q:
3218  do_exit(cur_stream);
3219  break;
3220  case SDLK_f:
3221  toggle_full_screen(cur_stream);
3222  cur_stream->force_refresh = 1;
3223  break;
3224  case SDLK_p:
3225  case SDLK_SPACE:
3226  toggle_pause(cur_stream);
3227  break;
3228  case SDLK_s: // S: Step to next frame
3229  step_to_next_frame(cur_stream);
3230  break;
3231  case SDLK_a:
3233  break;
3234  case SDLK_v:
3236  break;
3237  case SDLK_c:
3241  break;
3242  case SDLK_t:
3244  break;
3245  case SDLK_w:
3246  toggle_audio_display(cur_stream);
3247  break;
3248  case SDLK_PAGEUP:
3249  if (cur_stream->ic->nb_chapters <= 1) {
3250  incr = 600.0;
3251  goto do_seek;
3252  }
3253  seek_chapter(cur_stream, 1);
3254  break;
3255  case SDLK_PAGEDOWN:
3256  if (cur_stream->ic->nb_chapters <= 1) {
3257  incr = -600.0;
3258  goto do_seek;
3259  }
3260  seek_chapter(cur_stream, -1);
3261  break;
3262  case SDLK_LEFT:
3263  incr = -10.0;
3264  goto do_seek;
3265  case SDLK_RIGHT:
3266  incr = 10.0;
3267  goto do_seek;
3268  case SDLK_UP:
3269  incr = 60.0;
3270  goto do_seek;
3271  case SDLK_DOWN:
3272  incr = -60.0;
3273  do_seek:
3274  if (seek_by_bytes) {
3275  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3276  pos = cur_stream->video_current_pos;
3277  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3278  pos = cur_stream->audio_pkt.pos;
3279  } else
3280  pos = avio_tell(cur_stream->ic->pb);
3281  if (cur_stream->ic->bit_rate)
3282  incr *= cur_stream->ic->bit_rate / 8.0;
3283  else
3284  incr *= 180000.0;
3285  pos += incr;
3286  stream_seek(cur_stream, pos, incr, 1);
3287  } else {
3288  pos = get_master_clock(cur_stream);
3289  if (isnan(pos))
3290  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3291  pos += incr;
3292  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3293  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3294  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3295  }
3296  break;
3297  default:
3298  break;
3299  }
3300  break;
3301  case SDL_VIDEOEXPOSE:
3302  cur_stream->force_refresh = 1;
3303  break;
3304  case SDL_MOUSEBUTTONDOWN:
3305  if (exit_on_mousedown) {
3306  do_exit(cur_stream);
3307  break;
3308  }
3309  case SDL_MOUSEMOTION:
3310  if (cursor_hidden) {
3311  SDL_ShowCursor(1);
3312  cursor_hidden = 0;
3313  }
3315  if (event.type == SDL_MOUSEBUTTONDOWN) {
3316  x = event.button.x;
3317  } else {
3318  if (event.motion.state != SDL_PRESSED)
3319  break;
3320  x = event.motion.x;
3321  }
3322  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3323  uint64_t size = avio_size(cur_stream->ic->pb);
3324  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3325  } else {
3326  int64_t ts;
3327  int ns, hh, mm, ss;
3328  int tns, thh, tmm, tss;
3329  tns = cur_stream->ic->duration / 1000000LL;
3330  thh = tns / 3600;
3331  tmm = (tns % 3600) / 60;
3332  tss = (tns % 60);
3333  frac = x / cur_stream->width;
3334  ns = frac * tns;
3335  hh = ns / 3600;
3336  mm = (ns % 3600) / 60;
3337  ss = (ns % 60);
3338  av_log(NULL, AV_LOG_INFO,
3339  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3340  hh, mm, ss, thh, tmm, tss);
3341  ts = frac * cur_stream->ic->duration;
3342  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3343  ts += cur_stream->ic->start_time;
3344  stream_seek(cur_stream, ts, 0, 0);
3345  }
3346  break;
3347  case SDL_VIDEORESIZE:
3348  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3349  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3350  if (!screen) {
3351  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3352  do_exit(cur_stream);
3353  }
3354  screen_width = cur_stream->width = screen->w;
3355  screen_height = cur_stream->height = screen->h;
3356  cur_stream->force_refresh = 1;
3357  break;
3358  case SDL_QUIT:
3359  case FF_QUIT_EVENT:
3360  do_exit(cur_stream);
3361  break;
3362  case FF_ALLOC_EVENT:
3363  alloc_picture(event.user.data1);
3364  break;
3365  default:
3366  break;
3367  }
3368  }
3369 }
3370 
3371 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3372 {
3373  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3374  return opt_default(NULL, "video_size", arg);
3375 }
3376 
3377 static int opt_width(void *optctx, const char *opt, const char *arg)
3378 {
3379  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3380  return 0;
3381 }
3382 
3383 static int opt_height(void *optctx, const char *opt, const char *arg)
3384 {
3385  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3386  return 0;
3387 }
3388 
3389 static int opt_format(void *optctx, const char *opt, const char *arg)
3390 {
3391  file_iformat = av_find_input_format(arg);
3392  if (!file_iformat) {
3393  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3394  return AVERROR(EINVAL);
3395  }
3396  return 0;
3397 }
3398 
3399 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3400 {
3401  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3402  return opt_default(NULL, "pixel_format", arg);
3403 }
3404 
3405 static int opt_sync(void *optctx, const char *opt, const char *arg)
3406 {
3407  if (!strcmp(arg, "audio"))
3409  else if (!strcmp(arg, "video"))
3411  else if (!strcmp(arg, "ext"))
3413  else {
3414  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3415  exit(1);
3416  }
3417  return 0;
3418 }
3419 
3420 static int opt_seek(void *optctx, const char *opt, const char *arg)
3421 {
3422  start_time = parse_time_or_die(opt, arg, 1);
3423  return 0;
3424 }
3425 
3426 static int opt_duration(void *optctx, const char *opt, const char *arg)
3427 {
3428  duration = parse_time_or_die(opt, arg, 1);
3429  return 0;
3430 }
3431 
3432 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3433 {
3434  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3435  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3436  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3437  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3438  return 0;
3439 }
3440 
3441 static void opt_input_file(void *optctx, const char *filename)
3442 {
3443  if (input_filename) {
3444  av_log(NULL, AV_LOG_FATAL,
3445  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3446  filename, input_filename);
3447  exit(1);
3448  }
3449  if (!strcmp(filename, "-"))
3450  filename = "pipe:";
3451  input_filename = filename;
3452 }
3453 
3454 static int opt_codec(void *optctx, const char *opt, const char *arg)
3455 {
3456  const char *spec = strchr(opt, ':');
3457  if (!spec) {
3458  av_log(NULL, AV_LOG_ERROR,
3459  "No media specifier was specified in '%s' in option '%s'\n",
3460  arg, opt);
3461  return AVERROR(EINVAL);
3462  }
3463  spec++;
3464  switch (spec[0]) {
3465  case 'a' : audio_codec_name = arg; break;
3466  case 's' : subtitle_codec_name = arg; break;
3467  case 'v' : video_codec_name = arg; break;
3468  default:
3469  av_log(NULL, AV_LOG_ERROR,
3470  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3471  return AVERROR(EINVAL);
3472  }
3473  return 0;
3474 }
3475 
3476 static int dummy;
3477 
3478 static const OptionDef options[] = {
3479 #include "cmdutils_common_opts.h"
3480  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3481  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3482  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3483  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3484  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3485  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3486  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3487  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3488  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3489  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3490  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3491  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3492  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3493  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3494  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3495  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3496  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3497  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3498  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3499  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3500  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3501  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3502  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3503  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3504  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3505  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3506  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3507  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3508  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3509  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3510  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3511 #if CONFIG_AVFILTER
3512  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3513  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3514 #endif
3515  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3516  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3517  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3518  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3519  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3520  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3521  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3522  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3523  { NULL, },
3524 };
3525 
3526 static void show_usage(void)
3527 {
3528  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3529  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3530  av_log(NULL, AV_LOG_INFO, "\n");
3531 }
3532 
3533 void show_help_default(const char *opt, const char *arg)
3534 {
3536  show_usage();
3537  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3538  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3539  printf("\n");
3542 #if !CONFIG_AVFILTER
3544 #else
3546 #endif
3547  printf("\nWhile playing:\n"
3548  "q, ESC quit\n"
3549  "f toggle full screen\n"
3550  "p, SPC pause\n"
3551  "a cycle audio channel in the current program\n"
3552  "v cycle video channel\n"
3553  "t cycle subtitle channel in the current program\n"
3554  "c cycle program\n"
3555  "w show audio waves\n"
3556  "s activate frame-step mode\n"
3557  "left/right seek backward/forward 10 seconds\n"
3558  "down/up seek backward/forward 1 minute\n"
3559  "page down/page up seek backward/forward 10 minutes\n"
3560  "mouse click seek to percentage in file corresponding to fraction of width\n"
3561  );
3562 }
3563 
3564 static int lockmgr(void **mtx, enum AVLockOp op)
3565 {
3566  switch(op) {
3567  case AV_LOCK_CREATE:
3568  *mtx = SDL_CreateMutex();
3569  if(!*mtx)
3570  return 1;
3571  return 0;
3572  case AV_LOCK_OBTAIN:
3573  return !!SDL_LockMutex(*mtx);
3574  case AV_LOCK_RELEASE:
3575  return !!SDL_UnlockMutex(*mtx);
3576  case AV_LOCK_DESTROY:
3577  SDL_DestroyMutex(*mtx);
3578  return 0;
3579  }
3580  return 1;
3581 }
3582 
3583 /* Called from the main */
3584 int main(int argc, char **argv)
3585 {
3586  int flags;
3587  VideoState *is;
3588  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3589 
3591  parse_loglevel(argc, argv, options);
3592 
3593  /* register all codecs, demux and protocols */
3594 #if CONFIG_AVDEVICE
3596 #endif
3597 #if CONFIG_AVFILTER
3599 #endif
3600  av_register_all();
3602 
3603  init_opts();
3604 
3605  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3606  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3607 
3608  show_banner(argc, argv, options);
3609 
3610  parse_options(NULL, argc, argv, options, opt_input_file);
3611 
3612  if (!input_filename) {
3613  show_usage();
3614  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3615  av_log(NULL, AV_LOG_FATAL,
3616  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3617  exit(1);
3618  }
3619 
3620  if (display_disable) {
3621  video_disable = 1;
3622  }
3623  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3624  if (audio_disable)
3625  flags &= ~SDL_INIT_AUDIO;
3626  if (display_disable)
3627  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3628 #if !defined(__MINGW32__) && !defined(__APPLE__)
3629  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3630 #endif
3631  if (SDL_Init (flags)) {
3632  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3633  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3634  exit(1);
3635  }
3636 
3637  if (!display_disable) {
3638  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3639  fs_screen_width = vi->current_w;
3640  fs_screen_height = vi->current_h;
3641  }
3642 
3643  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3644  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3645  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3646 
3648  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3649  do_exit(NULL);
3650  }
3651 
3652  av_init_packet(&flush_pkt);
3653  flush_pkt.data = (uint8_t *)&flush_pkt;
3654 
3655  is = stream_open(input_filename, file_iformat);
3656  if (!is) {
3657  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3658  do_exit(NULL);
3659  }
3660 
3661  event_loop(is);
3662 
3663  /* never returns */
3664 
3665  return 0;
3666 }