FFmpeg
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
47 
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 5
68 
69 /* SDL audio buffer size, in samples. Should be small to have precise
70  A/V sync as SDL does not have hardware buffer fullness info. */
71 #define SDL_AUDIO_BUFFER_SIZE 1024
72 
73 /* no AV sync correction is done if below the AV sync threshold */
74 #define AV_SYNC_THRESHOLD 0.01
75 /* no AV correction is done if too big error */
76 #define AV_NOSYNC_THRESHOLD 10.0
77 
78 /* maximum audio speed change to get correct sync */
79 #define SAMPLE_CORRECTION_PERCENT_MAX 10
80 
81 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
82 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
83 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
84 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
85 
86 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
87 #define AUDIO_DIFF_AVG_NB 20
88 
89 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
90 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
91 #define SAMPLE_ARRAY_SIZE (8 * 65536)
92 
93 #define CURSOR_HIDE_DELAY 1000000
94 
95 static int64_t sws_flags = SWS_BICUBIC;
96 
97 typedef struct MyAVPacketList {
100  int serial;
102 
103 typedef struct PacketQueue {
106  int size;
108  int serial;
109  SDL_mutex *mutex;
110  SDL_cond *cond;
111 } PacketQueue;
112 
113 #define VIDEO_PICTURE_QUEUE_SIZE 4
114 #define SUBPICTURE_QUEUE_SIZE 4
115 
116 typedef struct VideoPicture {
117  double pts; // presentation timestamp for this picture
118  int64_t pos; // byte position in file
119  int skip;
120  SDL_Overlay *bmp;
121  int width, height; /* source height & width */
125  int serial;
126 
127 #if CONFIG_AVFILTER
128  AVFilterBufferRef *picref;
129 #endif
130 } VideoPicture;
131 
132 typedef struct SubPicture {
133  double pts; /* presentation time stamp for this picture */
135 } SubPicture;
136 
137 typedef struct AudioParams {
138  int freq;
139  int channels;
142 } AudioParams;
143 
144 enum {
145  AV_SYNC_AUDIO_MASTER, /* default choice */
147  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
148 };
149 
150 typedef struct VideoState {
151  SDL_Thread *read_tid;
152  SDL_Thread *video_tid;
153  SDL_Thread *refresh_tid;
158  int paused;
161  int seek_req;
163  int64_t seek_pos;
164  int64_t seek_rel;
167  int realtime;
168 
170 
172  double external_clock; ///< external clock base
173  double external_clock_drift; ///< external clock base - time (av_gettime) at which we updated external_clock
174  int64_t external_clock_time; ///< last reference time
175  double external_clock_speed; ///< speed of the external clock
176 
177  double audio_clock;
178  double audio_diff_cum; /* used for AV difference average computation */
185  DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
189  unsigned int audio_buf_size; /* in bytes */
190  int audio_buf_index; /* in bytes */
203 
204  enum ShowMode {
206  } show_mode;
213  int xpos;
214 
215  SDL_Thread *subtitle_tid;
222  SDL_mutex *subpq_mutex;
223  SDL_cond *subpq_cond;
224 
225  double frame_timer;
232  double video_clock; // pts of last decoded frame / predicted pts of next decoded frame
236  double video_current_pts; // current displayed pts (different from video_clock if frame fifos are used)
237  double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
238  int64_t video_current_pos; // current displayed file pos
239  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
242  SDL_mutex *pictq_mutex;
243  SDL_cond *pictq_cond;
244 #if !CONFIG_AVFILTER
246 #endif
248 
249  char filename[1024];
251  int step;
252 
253 #if CONFIG_AVFILTER
254  AVFilterContext *in_video_filter; // the first filter in the video chain
255  AVFilterContext *out_video_filter; // the last filter in the video chain
256  int use_dr1;
257  FrameBuffer *buffer_pool;
258 #endif
259 
260  int refresh;
262 
264 } VideoState;
265 
266 /* options specified by the user */
268 static const char *input_filename;
269 static const char *window_title;
270 static int fs_screen_width;
271 static int fs_screen_height;
272 static int screen_width = 0;
273 static int screen_height = 0;
274 static int audio_disable;
275 static int video_disable;
277  [AVMEDIA_TYPE_AUDIO] = -1,
278  [AVMEDIA_TYPE_VIDEO] = -1,
279  [AVMEDIA_TYPE_SUBTITLE] = -1,
280 };
281 static int seek_by_bytes = -1;
282 static int display_disable;
283 static int show_status = 1;
285 static int64_t start_time = AV_NOPTS_VALUE;
286 static int64_t duration = AV_NOPTS_VALUE;
287 static int workaround_bugs = 1;
288 static int fast = 0;
289 static int genpts = 0;
290 static int lowres = 0;
291 static int idct = FF_IDCT_AUTO;
295 static int error_concealment = 3;
296 static int decoder_reorder_pts = -1;
297 static int autoexit;
298 static int exit_on_keydown;
299 static int exit_on_mousedown;
300 static int loop = 1;
301 static int framedrop = -1;
302 static int infinite_buffer = -1;
303 static enum ShowMode show_mode = SHOW_MODE_NONE;
304 static const char *audio_codec_name;
305 static const char *subtitle_codec_name;
306 static const char *video_codec_name;
307 static int rdftspeed = 20;
308 static int64_t cursor_last_shown;
309 static int cursor_hidden = 0;
310 #if CONFIG_AVFILTER
311 static char *vfilters = NULL;
312 #endif
313 
314 /* current context */
315 static int is_full_screen;
316 static int64_t audio_callback_time;
317 
319 
320 #define FF_ALLOC_EVENT (SDL_USEREVENT)
321 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
322 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
323 
324 static SDL_Surface *screen;
325 
326 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
327 
329 {
330  MyAVPacketList *pkt1;
331 
332  if (q->abort_request)
333  return -1;
334 
335  pkt1 = av_malloc(sizeof(MyAVPacketList));
336  if (!pkt1)
337  return -1;
338  pkt1->pkt = *pkt;
339  pkt1->next = NULL;
340  if (pkt == &flush_pkt)
341  q->serial++;
342  pkt1->serial = q->serial;
343 
344  if (!q->last_pkt)
345  q->first_pkt = pkt1;
346  else
347  q->last_pkt->next = pkt1;
348  q->last_pkt = pkt1;
349  q->nb_packets++;
350  q->size += pkt1->pkt.size + sizeof(*pkt1);
351  /* XXX: should duplicate packet data in DV case */
352  SDL_CondSignal(q->cond);
353  return 0;
354 }
355 
357 {
358  int ret;
359 
360  /* duplicate the packet */
361  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
362  return -1;
363 
364  SDL_LockMutex(q->mutex);
365  ret = packet_queue_put_private(q, pkt);
366  SDL_UnlockMutex(q->mutex);
367 
368  if (pkt != &flush_pkt && ret < 0)
369  av_free_packet(pkt);
370 
371  return ret;
372 }
373 
374 /* packet queue handling */
376 {
377  memset(q, 0, sizeof(PacketQueue));
378  q->mutex = SDL_CreateMutex();
379  q->cond = SDL_CreateCond();
380  q->abort_request = 1;
381 }
382 
384 {
385  MyAVPacketList *pkt, *pkt1;
386 
387  SDL_LockMutex(q->mutex);
388  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
389  pkt1 = pkt->next;
390  av_free_packet(&pkt->pkt);
391  av_freep(&pkt);
392  }
393  q->last_pkt = NULL;
394  q->first_pkt = NULL;
395  q->nb_packets = 0;
396  q->size = 0;
397  SDL_UnlockMutex(q->mutex);
398 }
399 
401 {
403  SDL_DestroyMutex(q->mutex);
404  SDL_DestroyCond(q->cond);
405 }
406 
408 {
409  SDL_LockMutex(q->mutex);
410 
411  q->abort_request = 1;
412 
413  SDL_CondSignal(q->cond);
414 
415  SDL_UnlockMutex(q->mutex);
416 }
417 
419 {
420  SDL_LockMutex(q->mutex);
421  q->abort_request = 0;
422  packet_queue_put_private(q, &flush_pkt);
423  SDL_UnlockMutex(q->mutex);
424 }
425 
426 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
427 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
428 {
429  MyAVPacketList *pkt1;
430  int ret;
431 
432  SDL_LockMutex(q->mutex);
433 
434  for (;;) {
435  if (q->abort_request) {
436  ret = -1;
437  break;
438  }
439 
440  pkt1 = q->first_pkt;
441  if (pkt1) {
442  q->first_pkt = pkt1->next;
443  if (!q->first_pkt)
444  q->last_pkt = NULL;
445  q->nb_packets--;
446  q->size -= pkt1->pkt.size + sizeof(*pkt1);
447  *pkt = pkt1->pkt;
448  if (serial)
449  *serial = pkt1->serial;
450  av_free(pkt1);
451  ret = 1;
452  break;
453  } else if (!block) {
454  ret = 0;
455  break;
456  } else {
457  SDL_CondWait(q->cond, q->mutex);
458  }
459  }
460  SDL_UnlockMutex(q->mutex);
461  return ret;
462 }
463 
464 static inline void fill_rectangle(SDL_Surface *screen,
465  int x, int y, int w, int h, int color, int update)
466 {
467  SDL_Rect rect;
468  rect.x = x;
469  rect.y = y;
470  rect.w = w;
471  rect.h = h;
472  SDL_FillRect(screen, &rect, color);
473  if (update && w > 0 && h > 0)
474  SDL_UpdateRect(screen, x, y, w, h);
475 }
476 
477 /* draw only the border of a rectangle */
478 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
479 {
480  int w1, w2, h1, h2;
481 
482  /* fill the background */
483  w1 = x;
484  if (w1 < 0)
485  w1 = 0;
486  w2 = width - (x + w);
487  if (w2 < 0)
488  w2 = 0;
489  h1 = y;
490  if (h1 < 0)
491  h1 = 0;
492  h2 = height - (y + h);
493  if (h2 < 0)
494  h2 = 0;
496  xleft, ytop,
497  w1, height,
498  color, update);
500  xleft + width - w2, ytop,
501  w2, height,
502  color, update);
504  xleft + w1, ytop,
505  width - w1 - w2, h1,
506  color, update);
508  xleft + w1, ytop + height - h2,
509  width - w1 - w2, h2,
510  color, update);
511 }
512 
513 #define ALPHA_BLEND(a, oldp, newp, s)\
514 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
515 
516 #define RGBA_IN(r, g, b, a, s)\
517 {\
518  unsigned int v = ((const uint32_t *)(s))[0];\
519  a = (v >> 24) & 0xff;\
520  r = (v >> 16) & 0xff;\
521  g = (v >> 8) & 0xff;\
522  b = v & 0xff;\
523 }
524 
525 #define YUVA_IN(y, u, v, a, s, pal)\
526 {\
527  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
528  a = (val >> 24) & 0xff;\
529  y = (val >> 16) & 0xff;\
530  u = (val >> 8) & 0xff;\
531  v = val & 0xff;\
532 }
533 
534 #define YUVA_OUT(d, y, u, v, a)\
535 {\
536  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
537 }
538 
539 
540 #define BPP 1
541 
542 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
543 {
544  int wrap, wrap3, width2, skip2;
545  int y, u, v, a, u1, v1, a1, w, h;
546  uint8_t *lum, *cb, *cr;
547  const uint8_t *p;
548  const uint32_t *pal;
549  int dstx, dsty, dstw, dsth;
550 
551  dstw = av_clip(rect->w, 0, imgw);
552  dsth = av_clip(rect->h, 0, imgh);
553  dstx = av_clip(rect->x, 0, imgw - dstw);
554  dsty = av_clip(rect->y, 0, imgh - dsth);
555  lum = dst->data[0] + dsty * dst->linesize[0];
556  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
557  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
558 
559  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
560  skip2 = dstx >> 1;
561  wrap = dst->linesize[0];
562  wrap3 = rect->pict.linesize[0];
563  p = rect->pict.data[0];
564  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
565 
566  if (dsty & 1) {
567  lum += dstx;
568  cb += skip2;
569  cr += skip2;
570 
571  if (dstx & 1) {
572  YUVA_IN(y, u, v, a, p, pal);
573  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
575  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
576  cb++;
577  cr++;
578  lum++;
579  p += BPP;
580  }
581  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
582  YUVA_IN(y, u, v, a, p, pal);
583  u1 = u;
584  v1 = v;
585  a1 = a;
586  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587 
588  YUVA_IN(y, u, v, a, p + BPP, pal);
589  u1 += u;
590  v1 += v;
591  a1 += a;
592  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
593  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
594  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
595  cb++;
596  cr++;
597  p += 2 * BPP;
598  lum += 2;
599  }
600  if (w) {
601  YUVA_IN(y, u, v, a, p, pal);
602  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
603  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
604  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
605  p++;
606  lum++;
607  }
608  p += wrap3 - dstw * BPP;
609  lum += wrap - dstw - dstx;
610  cb += dst->linesize[1] - width2 - skip2;
611  cr += dst->linesize[2] - width2 - skip2;
612  }
613  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
614  lum += dstx;
615  cb += skip2;
616  cr += skip2;
617 
618  if (dstx & 1) {
619  YUVA_IN(y, u, v, a, p, pal);
620  u1 = u;
621  v1 = v;
622  a1 = a;
623  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624  p += wrap3;
625  lum += wrap;
626  YUVA_IN(y, u, v, a, p, pal);
627  u1 += u;
628  v1 += v;
629  a1 += a;
630  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
631  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
632  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
633  cb++;
634  cr++;
635  p += -wrap3 + BPP;
636  lum += -wrap + 1;
637  }
638  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
639  YUVA_IN(y, u, v, a, p, pal);
640  u1 = u;
641  v1 = v;
642  a1 = a;
643  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644 
645  YUVA_IN(y, u, v, a, p + BPP, pal);
646  u1 += u;
647  v1 += v;
648  a1 += a;
649  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
650  p += wrap3;
651  lum += wrap;
652 
653  YUVA_IN(y, u, v, a, p, pal);
654  u1 += u;
655  v1 += v;
656  a1 += a;
657  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658 
659  YUVA_IN(y, u, v, a, p + BPP, pal);
660  u1 += u;
661  v1 += v;
662  a1 += a;
663  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
664 
665  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
666  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
667 
668  cb++;
669  cr++;
670  p += -wrap3 + 2 * BPP;
671  lum += -wrap + 2;
672  }
673  if (w) {
674  YUVA_IN(y, u, v, a, p, pal);
675  u1 = u;
676  v1 = v;
677  a1 = a;
678  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
679  p += wrap3;
680  lum += wrap;
681  YUVA_IN(y, u, v, a, p, pal);
682  u1 += u;
683  v1 += v;
684  a1 += a;
685  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
686  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
687  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
688  cb++;
689  cr++;
690  p += -wrap3 + BPP;
691  lum += -wrap + 1;
692  }
693  p += wrap3 + (wrap3 - dstw * BPP);
694  lum += wrap + (wrap - dstw - dstx);
695  cb += dst->linesize[1] - width2 - skip2;
696  cr += dst->linesize[2] - width2 - skip2;
697  }
698  /* handle odd height */
699  if (h) {
700  lum += dstx;
701  cb += skip2;
702  cr += skip2;
703 
704  if (dstx & 1) {
705  YUVA_IN(y, u, v, a, p, pal);
706  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
707  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
708  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
709  cb++;
710  cr++;
711  lum++;
712  p += BPP;
713  }
714  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
715  YUVA_IN(y, u, v, a, p, pal);
716  u1 = u;
717  v1 = v;
718  a1 = a;
719  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
720 
721  YUVA_IN(y, u, v, a, p + BPP, pal);
722  u1 += u;
723  v1 += v;
724  a1 += a;
725  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
726  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
727  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
728  cb++;
729  cr++;
730  p += 2 * BPP;
731  lum += 2;
732  }
733  if (w) {
734  YUVA_IN(y, u, v, a, p, pal);
735  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
736  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
737  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
738  }
739  }
740 }
741 
743 {
744  avsubtitle_free(&sp->sub);
745 }
746 
747 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
748 {
749  float aspect_ratio;
750  int width, height, x, y;
751 
752  if (vp->sample_aspect_ratio.num == 0)
753  aspect_ratio = 0;
754  else
755  aspect_ratio = av_q2d(vp->sample_aspect_ratio);
756 
757  if (aspect_ratio <= 0.0)
758  aspect_ratio = 1.0;
759  aspect_ratio *= (float)vp->width / (float)vp->height;
760 
761  /* XXX: we suppose the screen has a 1.0 pixel ratio */
762  height = scr_height;
763  width = ((int)rint(height * aspect_ratio)) & ~1;
764  if (width > scr_width) {
765  width = scr_width;
766  height = ((int)rint(width / aspect_ratio)) & ~1;
767  }
768  x = (scr_width - width) / 2;
769  y = (scr_height - height) / 2;
770  rect->x = scr_xleft + x;
771  rect->y = scr_ytop + y;
772  rect->w = FFMAX(width, 1);
773  rect->h = FFMAX(height, 1);
774 }
775 
777 {
778  VideoPicture *vp;
779  SubPicture *sp;
780  AVPicture pict;
781  SDL_Rect rect;
782  int i;
783 
784  vp = &is->pictq[is->pictq_rindex];
785  if (vp->bmp) {
786  if (is->subtitle_st) {
787  if (is->subpq_size > 0) {
788  sp = &is->subpq[is->subpq_rindex];
789 
790  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
791  SDL_LockYUVOverlay (vp->bmp);
792 
793  pict.data[0] = vp->bmp->pixels[0];
794  pict.data[1] = vp->bmp->pixels[2];
795  pict.data[2] = vp->bmp->pixels[1];
796 
797  pict.linesize[0] = vp->bmp->pitches[0];
798  pict.linesize[1] = vp->bmp->pitches[2];
799  pict.linesize[2] = vp->bmp->pitches[1];
800 
801  for (i = 0; i < sp->sub.num_rects; i++)
802  blend_subrect(&pict, sp->sub.rects[i],
803  vp->bmp->w, vp->bmp->h);
804 
805  SDL_UnlockYUVOverlay (vp->bmp);
806  }
807  }
808  }
809 
810  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
811 
812  SDL_DisplayYUVOverlay(vp->bmp, &rect);
813 
814  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
815  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
816  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
817  is->last_display_rect = rect;
818  }
819  }
820 }
821 
822 static inline int compute_mod(int a, int b)
823 {
824  return a < 0 ? a%b + b : a%b;
825 }
826 
828 {
829  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
830  int ch, channels, h, h2, bgcolor, fgcolor;
831  int64_t time_diff;
832  int rdft_bits, nb_freq;
833 
834  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
835  ;
836  nb_freq = 1 << (rdft_bits - 1);
837 
838  /* compute display index : center on currently output samples */
839  channels = s->audio_tgt.channels;
840  nb_display_channels = channels;
841  if (!s->paused) {
842  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
843  n = 2 * channels;
844  delay = s->audio_write_buf_size;
845  delay /= n;
846 
847  /* to be more precise, we take into account the time spent since
848  the last buffer computation */
849  if (audio_callback_time) {
850  time_diff = av_gettime() - audio_callback_time;
851  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
852  }
853 
854  delay += 2 * data_used;
855  if (delay < data_used)
856  delay = data_used;
857 
858  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
859  if (s->show_mode == SHOW_MODE_WAVES) {
860  h = INT_MIN;
861  for (i = 0; i < 1000; i += channels) {
862  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
863  int a = s->sample_array[idx];
864  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
865  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
866  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
867  int score = a - d;
868  if (h < score && (b ^ c) < 0) {
869  h = score;
870  i_start = idx;
871  }
872  }
873  }
874 
875  s->last_i_start = i_start;
876  } else {
877  i_start = s->last_i_start;
878  }
879 
880  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
881  if (s->show_mode == SHOW_MODE_WAVES) {
883  s->xleft, s->ytop, s->width, s->height,
884  bgcolor, 0);
885 
886  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
887 
888  /* total height for one channel */
889  h = s->height / nb_display_channels;
890  /* graph height / 2 */
891  h2 = (h * 9) / 20;
892  for (ch = 0; ch < nb_display_channels; ch++) {
893  i = i_start + ch;
894  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
895  for (x = 0; x < s->width; x++) {
896  y = (s->sample_array[i] * h2) >> 15;
897  if (y < 0) {
898  y = -y;
899  ys = y1 - y;
900  } else {
901  ys = y1;
902  }
904  s->xleft + x, ys, 1, y,
905  fgcolor, 0);
906  i += channels;
907  if (i >= SAMPLE_ARRAY_SIZE)
908  i -= SAMPLE_ARRAY_SIZE;
909  }
910  }
911 
912  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
913 
914  for (ch = 1; ch < nb_display_channels; ch++) {
915  y = s->ytop + ch * h;
917  s->xleft, y, s->width, 1,
918  fgcolor, 0);
919  }
920  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
921  } else {
922  nb_display_channels= FFMIN(nb_display_channels, 2);
923  if (rdft_bits != s->rdft_bits) {
924  av_rdft_end(s->rdft);
925  av_free(s->rdft_data);
926  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
927  s->rdft_bits = rdft_bits;
928  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
929  }
930  {
931  FFTSample *data[2];
932  for (ch = 0; ch < nb_display_channels; ch++) {
933  data[ch] = s->rdft_data + 2 * nb_freq * ch;
934  i = i_start + ch;
935  for (x = 0; x < 2 * nb_freq; x++) {
936  double w = (x-nb_freq) * (1.0 / nb_freq);
937  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
938  i += channels;
939  if (i >= SAMPLE_ARRAY_SIZE)
940  i -= SAMPLE_ARRAY_SIZE;
941  }
942  av_rdft_calc(s->rdft, data[ch]);
943  }
944  // least efficient way to do this, we should of course directly access it but its more than fast enough
945  for (y = 0; y < s->height; y++) {
946  double w = 1 / sqrt(nb_freq);
947  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
948  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
949  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
950  a = FFMIN(a, 255);
951  b = FFMIN(b, 255);
952  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
953 
955  s->xpos, s->height-y, 1, 1,
956  fgcolor, 0);
957  }
958  }
959  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
960  if (!s->paused)
961  s->xpos++;
962  if (s->xpos >= s->width)
963  s->xpos= s->xleft;
964  }
965 }
966 
967 static void stream_close(VideoState *is)
968 {
969  VideoPicture *vp;
970  int i;
971  /* XXX: use a special url_shutdown call to abort parse cleanly */
972  is->abort_request = 1;
973  SDL_WaitThread(is->read_tid, NULL);
974  SDL_WaitThread(is->refresh_tid, NULL);
978 
979  /* free all pictures */
980  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
981  vp = &is->pictq[i];
982 #if CONFIG_AVFILTER
983  avfilter_unref_bufferp(&vp->picref);
984 #endif
985  if (vp->bmp) {
986  SDL_FreeYUVOverlay(vp->bmp);
987  vp->bmp = NULL;
988  }
989  }
990  SDL_DestroyMutex(is->pictq_mutex);
991  SDL_DestroyCond(is->pictq_cond);
992  SDL_DestroyMutex(is->subpq_mutex);
993  SDL_DestroyCond(is->subpq_cond);
994  SDL_DestroyCond(is->continue_read_thread);
995 #if !CONFIG_AVFILTER
996  if (is->img_convert_ctx)
998 #endif
999  av_free(is);
1000 }
1001 
1002 static void do_exit(VideoState *is)
1003 {
1004  if (is) {
1005  stream_close(is);
1006  }
1008  uninit_opts();
1009 #if CONFIG_AVFILTER
1010  avfilter_uninit();
1011  av_freep(&vfilters);
1012 #endif
1014  if (show_status)
1015  printf("\n");
1016  SDL_Quit();
1017  av_log(NULL, AV_LOG_QUIET, "%s", "");
1018  exit(0);
1019 }
1020 
1021 static void sigterm_handler(int sig)
1022 {
1023  exit(123);
1024 }
1025 
1026 static int video_open(VideoState *is, int force_set_video_mode)
1027 {
1028  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1029  int w,h;
1030  VideoPicture *vp = &is->pictq[is->pictq_rindex];
1031  SDL_Rect rect;
1032 
1033  if (is_full_screen) flags |= SDL_FULLSCREEN;
1034  else flags |= SDL_RESIZABLE;
1035 
1037  w = fs_screen_width;
1038  h = fs_screen_height;
1039  } else if (!is_full_screen && screen_width) {
1040  w = screen_width;
1041  h = screen_height;
1042  } else if (vp->width) {
1043  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1044  w = rect.w;
1045  h = rect.h;
1046  } else {
1047  w = 640;
1048  h = 480;
1049  }
1050  if (screen && is->width == screen->w && screen->w == w
1051  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1052  return 0;
1053  screen = SDL_SetVideoMode(w, h, 0, flags);
1054  if (!screen) {
1055  fprintf(stderr, "SDL: could not set video mode - exiting\n");
1056  do_exit(is);
1057  }
1058  if (!window_title)
1060  SDL_WM_SetCaption(window_title, window_title);
1061 
1062  is->width = screen->w;
1063  is->height = screen->h;
1064 
1065  return 0;
1066 }
1067 
1068 /* display the current picture, if any */
1069 static void video_display(VideoState *is)
1070 {
1071  if (!screen)
1072  video_open(is, 0);
1073  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1074  video_audio_display(is);
1075  else if (is->video_st)
1076  video_image_display(is);
1077 }
1078 
1079 static int refresh_thread(void *opaque)
1080 {
1081  VideoState *is= opaque;
1082  while (!is->abort_request) {
1083  SDL_Event event;
1084  event.type = FF_REFRESH_EVENT;
1085  event.user.data1 = opaque;
1086  if (!is->refresh && (!is->paused || is->force_refresh)) {
1087  is->refresh = 1;
1088  SDL_PushEvent(&event);
1089  }
1090  //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1091  av_usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
1092  }
1093  return 0;
1094 }
1095 
1096 /* get the current audio clock value */
1097 static double get_audio_clock(VideoState *is)
1098 {
1099  if (is->paused) {
1100  return is->audio_current_pts;
1101  } else {
1102  return is->audio_current_pts_drift + av_gettime() / 1000000.0;
1103  }
1104 }
1105 
1106 /* get the current video clock value */
1107 static double get_video_clock(VideoState *is)
1108 {
1109  if (is->paused) {
1110  return is->video_current_pts;
1111  } else {
1112  return is->video_current_pts_drift + av_gettime() / 1000000.0;
1113  }
1114 }
1115 
1116 /* get the current external clock value */
1117 static double get_external_clock(VideoState *is)
1118 {
1119  if (is->paused) {
1120  return is->external_clock;
1121  } else {
1122  double time = av_gettime() / 1000000.0;
1123  return is->external_clock_drift + time - (time - is->external_clock_time / 1000000.0) * (1.0 - is->external_clock_speed);
1124  }
1125 }
1126 
1128  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1129  if (is->video_st)
1130  return AV_SYNC_VIDEO_MASTER;
1131  else
1132  return AV_SYNC_AUDIO_MASTER;
1133  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1134  if (is->audio_st)
1135  return AV_SYNC_AUDIO_MASTER;
1136  else
1137  return AV_SYNC_EXTERNAL_CLOCK;
1138  } else {
1139  return AV_SYNC_EXTERNAL_CLOCK;
1140  }
1141 }
1142 
1143 /* get the current master clock value */
1144 static double get_master_clock(VideoState *is)
1145 {
1146  double val;
1147 
1148  switch (get_master_sync_type(is)) {
1149  case AV_SYNC_VIDEO_MASTER:
1150  val = get_video_clock(is);
1151  break;
1152  case AV_SYNC_AUDIO_MASTER:
1153  val = get_audio_clock(is);
1154  break;
1155  default:
1156  val = get_external_clock(is);
1157  break;
1158  }
1159  return val;
1160 }
1161 
1162 static void update_external_clock_pts(VideoState *is, double pts)
1163 {
1165  is->external_clock = pts;
1166  is->external_clock_drift = pts - is->external_clock_time / 1000000.0;
1167 }
1168 
1169 static void check_external_clock_sync(VideoState *is, double pts) {
1170  if (fabs(get_external_clock(is) - pts) > AV_NOSYNC_THRESHOLD) {
1171  update_external_clock_pts(is, pts);
1172  }
1173 }
1174 
1175 static void update_external_clock_speed(VideoState *is, double speed) {
1177  is->external_clock_speed = speed;
1178 }
1179 
1181  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1182  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1184  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1185  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1187  } else {
1188  double speed = is->external_clock_speed;
1189  if (speed != 1.0)
1190  update_external_clock_speed(is, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1191  }
1192 }
1193 
1194 /* seek in the stream */
1195 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1196 {
1197  if (!is->seek_req) {
1198  is->seek_pos = pos;
1199  is->seek_rel = rel;
1200  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1201  if (seek_by_bytes)
1203  is->seek_req = 1;
1204  }
1205 }
1206 
1207 /* pause or resume the video */
1209 {
1210  if (is->paused) {
1211  is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1212  if (is->read_pause_return != AVERROR(ENOSYS)) {
1213  is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1214  }
1215  is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1216  }
1218  is->paused = !is->paused;
1219 }
1220 
1221 static double compute_target_delay(double delay, VideoState *is)
1222 {
1223  double sync_threshold, diff;
1224 
1225  /* update delay to follow master synchronisation source */
1227  /* if video is slave, we try to correct big delays by
1228  duplicating or deleting a frame */
1229  diff = get_video_clock(is) - get_master_clock(is);
1230 
1231  /* skip or repeat frame. We take into account the
1232  delay to compute the threshold. I still don't know
1233  if it is the best guess */
1234  sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1235  if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1236  if (diff <= -sync_threshold)
1237  delay = 0;
1238  else if (diff >= sync_threshold)
1239  delay = 2 * delay;
1240  }
1241  }
1242 
1243  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1244  delay, -diff);
1245 
1246  return delay;
1247 }
1248 
1249 static void pictq_next_picture(VideoState *is) {
1250  /* update queue size and signal for next picture */
1252  is->pictq_rindex = 0;
1253 
1254  SDL_LockMutex(is->pictq_mutex);
1255  is->pictq_size--;
1256  SDL_CondSignal(is->pictq_cond);
1257  SDL_UnlockMutex(is->pictq_mutex);
1258 }
1259 
1260 static void pictq_prev_picture(VideoState *is) {
1261  VideoPicture *prevvp;
1262  /* update queue size and signal for the previous picture */
1264  if (prevvp->allocated && !prevvp->skip) {
1265  SDL_LockMutex(is->pictq_mutex);
1266  if (is->pictq_size < VIDEO_PICTURE_QUEUE_SIZE - 1) {
1267  if (--is->pictq_rindex == -1)
1269  is->pictq_size++;
1270  }
1271  SDL_CondSignal(is->pictq_cond);
1272  SDL_UnlockMutex(is->pictq_mutex);
1273  }
1274 }
1275 
1276 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1277  double time = av_gettime() / 1000000.0;
1278  /* update current video pts */
1279  is->video_current_pts = pts;
1280  is->video_current_pts_drift = is->video_current_pts - time;
1281  is->video_current_pos = pos;
1282  is->frame_last_pts = pts;
1283  if (is->videoq.serial == serial)
1285 }
1286 
1287 /* called to display each frame */
1288 static void video_refresh(void *opaque)
1289 {
1290  VideoState *is = opaque;
1291  VideoPicture *vp;
1292  double time;
1293 
1294  SubPicture *sp, *sp2;
1295 
1296  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1298 
1299  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st)
1300  video_display(is);
1301 
1302  if (is->video_st) {
1303  if (is->force_refresh)
1304  pictq_prev_picture(is);
1305 retry:
1306  if (is->pictq_size == 0) {
1307  SDL_LockMutex(is->pictq_mutex);
1311  }
1312  SDL_UnlockMutex(is->pictq_mutex);
1313  // nothing to do, no picture to display in the queue
1314  } else {
1315  double last_duration, duration, delay;
1316  /* dequeue the picture */
1317  vp = &is->pictq[is->pictq_rindex];
1318 
1319  if (vp->skip) {
1320  pictq_next_picture(is);
1321  goto retry;
1322  }
1323 
1324  if (is->paused)
1325  goto display;
1326 
1327  /* compute nominal last_duration */
1328  last_duration = vp->pts - is->frame_last_pts;
1329  if (last_duration > 0 && last_duration < is->max_frame_duration) {
1330  /* if duration of the last frame was sane, update last_duration in video state */
1331  is->frame_last_duration = last_duration;
1332  }
1333  delay = compute_target_delay(is->frame_last_duration, is);
1334 
1335  time= av_gettime()/1000000.0;
1336  if (time < is->frame_timer + delay)
1337  return;
1338 
1339  if (delay > 0)
1340  is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
1341 
1342  SDL_LockMutex(is->pictq_mutex);
1343  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1344  SDL_UnlockMutex(is->pictq_mutex);
1345 
1346  if (is->pictq_size > 1) {
1347  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1348  duration = nextvp->pts - vp->pts;
1349  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1350  is->frame_drops_late++;
1351  pictq_next_picture(is);
1352  goto retry;
1353  }
1354  }
1355 
1356  if (is->subtitle_st) {
1357  if (is->subtitle_stream_changed) {
1358  SDL_LockMutex(is->subpq_mutex);
1359 
1360  while (is->subpq_size) {
1361  free_subpicture(&is->subpq[is->subpq_rindex]);
1362 
1363  /* update queue size and signal for next picture */
1364  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1365  is->subpq_rindex = 0;
1366 
1367  is->subpq_size--;
1368  }
1369  is->subtitle_stream_changed = 0;
1370 
1371  SDL_CondSignal(is->subpq_cond);
1372  SDL_UnlockMutex(is->subpq_mutex);
1373  } else {
1374  if (is->subpq_size > 0) {
1375  sp = &is->subpq[is->subpq_rindex];
1376 
1377  if (is->subpq_size > 1)
1378  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1379  else
1380  sp2 = NULL;
1381 
1382  if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1383  || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1384  {
1385  free_subpicture(sp);
1386 
1387  /* update queue size and signal for next picture */
1388  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1389  is->subpq_rindex = 0;
1390 
1391  SDL_LockMutex(is->subpq_mutex);
1392  is->subpq_size--;
1393  SDL_CondSignal(is->subpq_cond);
1394  SDL_UnlockMutex(is->subpq_mutex);
1395  }
1396  }
1397  }
1398  }
1399 
1400 display:
1401  /* display picture */
1402  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1403  video_display(is);
1404 
1405  pictq_next_picture(is);
1406 
1407  if (is->step && !is->paused)
1408  stream_toggle_pause(is);
1409  }
1410  }
1411  is->force_refresh = 0;
1412  if (show_status) {
1413  static int64_t last_time;
1414  int64_t cur_time;
1415  int aqsize, vqsize, sqsize;
1416  double av_diff;
1417 
1418  cur_time = av_gettime();
1419  if (!last_time || (cur_time - last_time) >= 30000) {
1420  aqsize = 0;
1421  vqsize = 0;
1422  sqsize = 0;
1423  if (is->audio_st)
1424  aqsize = is->audioq.size;
1425  if (is->video_st)
1426  vqsize = is->videoq.size;
1427  if (is->subtitle_st)
1428  sqsize = is->subtitleq.size;
1429  av_diff = 0;
1430  if (is->audio_st && is->video_st)
1431  av_diff = get_audio_clock(is) - get_video_clock(is);
1432  printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1433  get_master_clock(is),
1434  av_diff,
1436  aqsize / 1024,
1437  vqsize / 1024,
1438  sqsize,
1441  fflush(stdout);
1442  last_time = cur_time;
1443  }
1444  }
1445 }
1446 
1447 /* allocate a picture (needs to do that in main thread to avoid
1448  potential locking problems */
1449 static void alloc_picture(VideoState *is)
1450 {
1451  VideoPicture *vp;
1452 
1453  vp = &is->pictq[is->pictq_windex];
1454 
1455  if (vp->bmp)
1456  SDL_FreeYUVOverlay(vp->bmp);
1457 
1458 #if CONFIG_AVFILTER
1459  avfilter_unref_bufferp(&vp->picref);
1460 #endif
1461 
1462  video_open(is, 0);
1463 
1464  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1465  SDL_YV12_OVERLAY,
1466  screen);
1467  if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1468  /* SDL allocates a buffer smaller than requested if the video
1469  * overlay hardware is unable to support the requested size. */
1470  fprintf(stderr, "Error: the video system does not support an image\n"
1471  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1472  "to reduce the image size.\n", vp->width, vp->height );
1473  do_exit(is);
1474  }
1475 
1476  SDL_LockMutex(is->pictq_mutex);
1477  vp->allocated = 1;
1478  SDL_CondSignal(is->pictq_cond);
1479  SDL_UnlockMutex(is->pictq_mutex);
1480 }
1481 
1482 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1483  int i, width, height;
1484  Uint8 *p, *maxp;
1485  for (i = 0; i < 3; i++) {
1486  width = bmp->w;
1487  height = bmp->h;
1488  if (i > 0) {
1489  width >>= 1;
1490  height >>= 1;
1491  }
1492  if (bmp->pitches[i] > width) {
1493  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1494  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1495  *(p+1) = *p;
1496  }
1497  }
1498 }
1499 
1500 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos, int serial)
1501 {
1502  VideoPicture *vp;
1503  double frame_delay, pts = pts1;
1504 
1505  /* compute the exact PTS for the picture if it is omitted in the stream
1506  * pts1 is the dts of the pkt / pts of the frame */
1507  if (pts != 0) {
1508  /* update video clock with pts, if present */
1509  is->video_clock = pts;
1510  } else {
1511  pts = is->video_clock;
1512  }
1513  /* update video clock for next frame */
1514  frame_delay = av_q2d(is->video_st->codec->time_base);
1515  /* for MPEG2, the frame can be repeated, so we update the
1516  clock accordingly */
1517  frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1518  is->video_clock += frame_delay;
1519 
1520 #if defined(DEBUG_SYNC) && 0
1521  printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1522  av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1523 #endif
1524 
1525  /* wait until we have space to put a new picture */
1526  SDL_LockMutex(is->pictq_mutex);
1527 
1528  /* keep the last already displayed picture in the queue */
1529  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 2 &&
1530  !is->videoq.abort_request) {
1531  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1532  }
1533  SDL_UnlockMutex(is->pictq_mutex);
1534 
1535  if (is->videoq.abort_request)
1536  return -1;
1537 
1538  vp = &is->pictq[is->pictq_windex];
1539 
1540 #if CONFIG_AVFILTER
1541  vp->sample_aspect_ratio = ((AVFilterBufferRef *)src_frame->opaque)->video->sample_aspect_ratio;
1542 #else
1543  vp->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, src_frame);
1544 #endif
1545 
1546  /* alloc or resize hardware picture buffer */
1547  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1548  vp->width != src_frame->width ||
1549  vp->height != src_frame->height) {
1550  SDL_Event event;
1551 
1552  vp->allocated = 0;
1553  vp->reallocate = 0;
1554  vp->width = src_frame->width;
1555  vp->height = src_frame->height;
1556 
1557  /* the allocation must be done in the main thread to avoid
1558  locking problems. */
1559  event.type = FF_ALLOC_EVENT;
1560  event.user.data1 = is;
1561  SDL_PushEvent(&event);
1562 
1563  /* wait until the picture is allocated */
1564  SDL_LockMutex(is->pictq_mutex);
1565  while (!vp->allocated && !is->videoq.abort_request) {
1566  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1567  }
1568  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1569  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1570  while (!vp->allocated) {
1571  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1572  }
1573  }
1574  SDL_UnlockMutex(is->pictq_mutex);
1575 
1576  if (is->videoq.abort_request)
1577  return -1;
1578  }
1579 
1580  /* if the frame is not skipped, then display it */
1581  if (vp->bmp) {
1582  AVPicture pict = { { 0 } };
1583 #if CONFIG_AVFILTER
1584  avfilter_unref_bufferp(&vp->picref);
1585  vp->picref = src_frame->opaque;
1586 #endif
1587 
1588  /* get a pointer on the bitmap */
1589  SDL_LockYUVOverlay (vp->bmp);
1590 
1591  pict.data[0] = vp->bmp->pixels[0];
1592  pict.data[1] = vp->bmp->pixels[2];
1593  pict.data[2] = vp->bmp->pixels[1];
1594 
1595  pict.linesize[0] = vp->bmp->pitches[0];
1596  pict.linesize[1] = vp->bmp->pitches[2];
1597  pict.linesize[2] = vp->bmp->pitches[1];
1598 
1599 #if CONFIG_AVFILTER
1600  // FIXME use direct rendering
1601  av_picture_copy(&pict, (AVPicture *)src_frame,
1602  src_frame->format, vp->width, vp->height);
1603 #else
1604  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1606  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1608  if (is->img_convert_ctx == NULL) {
1609  fprintf(stderr, "Cannot initialize the conversion context\n");
1610  exit(1);
1611  }
1612  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1613  0, vp->height, pict.data, pict.linesize);
1614 #endif
1615  /* workaround SDL PITCH_WORKAROUND */
1617  /* update the bitmap content */
1618  SDL_UnlockYUVOverlay(vp->bmp);
1619 
1620  vp->pts = pts;
1621  vp->pos = pos;
1622  vp->skip = 0;
1623  vp->serial = serial;
1624 
1625  /* now we can update the picture count */
1627  is->pictq_windex = 0;
1628  SDL_LockMutex(is->pictq_mutex);
1629  is->pictq_size++;
1630  SDL_UnlockMutex(is->pictq_mutex);
1631  }
1632  return 0;
1633 }
1634 
1635 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
1636 {
1637  int got_picture, i;
1638 
1639  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1640  return -1;
1641 
1642  if (pkt->data == flush_pkt.data) {
1644 
1645  SDL_LockMutex(is->pictq_mutex);
1646  // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1647  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1648  is->pictq[i].skip = 1;
1649  }
1650  while (is->pictq_size && !is->videoq.abort_request) {
1651  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1652  }
1653  is->video_current_pos = -1;
1655  is->frame_last_duration = 0;
1656  is->frame_timer = (double)av_gettime() / 1000000.0;
1658  SDL_UnlockMutex(is->pictq_mutex);
1659 
1660  return 0;
1661  }
1662 
1663  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1664  return 0;
1665 
1666  if (got_picture) {
1667  int ret = 1;
1668 
1669  if (decoder_reorder_pts == -1) {
1670  *pts = av_frame_get_best_effort_timestamp(frame);
1671  } else if (decoder_reorder_pts) {
1672  *pts = frame->pkt_pts;
1673  } else {
1674  *pts = frame->pkt_dts;
1675  }
1676 
1677  if (*pts == AV_NOPTS_VALUE) {
1678  *pts = 0;
1679  }
1680 
1682  SDL_LockMutex(is->pictq_mutex);
1683  if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
1684  double clockdiff = get_video_clock(is) - get_master_clock(is);
1685  double dpts = av_q2d(is->video_st->time_base) * *pts;
1686  double ptsdiff = dpts - is->frame_last_pts;
1687  if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1688  ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1689  clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
1690  is->frame_last_dropped_pos = pkt->pos;
1691  is->frame_last_dropped_pts = dpts;
1692  is->frame_drops_early++;
1693  ret = 0;
1694  }
1695  }
1696  SDL_UnlockMutex(is->pictq_mutex);
1697  }
1698 
1699  return ret;
1700  }
1701  return 0;
1702 }
1703 
1704 #if CONFIG_AVFILTER
1705 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1706  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1707 {
1708  int ret;
1710 
1711  if (filtergraph) {
1712  outputs = avfilter_inout_alloc();
1713  inputs = avfilter_inout_alloc();
1714  if (!outputs || !inputs) {
1715  ret = AVERROR(ENOMEM);
1716  goto fail;
1717  }
1718 
1719  outputs->name = av_strdup("in");
1720  outputs->filter_ctx = source_ctx;
1721  outputs->pad_idx = 0;
1722  outputs->next = NULL;
1723 
1724  inputs->name = av_strdup("out");
1725  inputs->filter_ctx = sink_ctx;
1726  inputs->pad_idx = 0;
1727  inputs->next = NULL;
1728 
1729  if ((ret = avfilter_graph_parse(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1730  goto fail;
1731  } else {
1732  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1733  goto fail;
1734  }
1735 
1736  ret = avfilter_graph_config(graph, NULL);
1737 fail:
1738  avfilter_inout_free(&outputs);
1739  avfilter_inout_free(&inputs);
1740  return ret;
1741 }
1742 
1743 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1744 {
1745  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1746  char sws_flags_str[128];
1747  char buffersrc_args[256];
1748  int ret;
1749  AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
1750  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1751  AVCodecContext *codec = is->video_st->codec;
1752 
1753  if (!buffersink_params)
1754  return AVERROR(ENOMEM);
1755 
1756  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1757  graph->scale_sws_opts = av_strdup(sws_flags_str);
1758 
1759  snprintf(buffersrc_args, sizeof(buffersrc_args),
1760  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1761  frame->width, frame->height, frame->format,
1763  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1764 
1765  if ((ret = avfilter_graph_create_filter(&filt_src,
1766  avfilter_get_by_name("buffer"),
1767  "ffplay_buffer", buffersrc_args, NULL,
1768  graph)) < 0)
1769  goto fail;
1770 
1771  buffersink_params->pixel_fmts = pix_fmts;
1772  ret = avfilter_graph_create_filter(&filt_out,
1773  avfilter_get_by_name("ffbuffersink"),
1774  "ffplay_buffersink", NULL, buffersink_params, graph);
1775  if (ret < 0)
1776  goto fail;
1777 
1778  /* SDL YUV code is not handling odd width/height for some driver
1779  * combinations, therefore we crop the picture to an even width/height. */
1780  if ((ret = avfilter_graph_create_filter(&filt_crop,
1781  avfilter_get_by_name("crop"),
1782  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1783  goto fail;
1784  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1785  goto fail;
1786 
1787  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1788  goto fail;
1789 
1790  is->in_video_filter = filt_src;
1791  is->out_video_filter = filt_out;
1792 
1793 fail:
1794  av_freep(&buffersink_params);
1795  return ret;
1796 }
1797 
1798 #endif /* CONFIG_AVFILTER */
1799 
1800 static int video_thread(void *arg)
1801 {
1802  AVPacket pkt = { 0 };
1803  VideoState *is = arg;
1804  AVFrame *frame = avcodec_alloc_frame();
1805  int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1806  double pts;
1807  int ret;
1808  int serial = 0;
1809 
1810 #if CONFIG_AVFILTER
1811  AVCodecContext *codec = is->video_st->codec;
1813  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1814  int last_w = 0;
1815  int last_h = 0;
1816  enum AVPixelFormat last_format = -2;
1817 
1818  if (codec->codec->capabilities & CODEC_CAP_DR1) {
1819  is->use_dr1 = 1;
1820  codec->get_buffer = codec_get_buffer;
1822  codec->opaque = &is->buffer_pool;
1823  }
1824 #endif
1825 
1826  for (;;) {
1827 #if CONFIG_AVFILTER
1828  AVFilterBufferRef *picref;
1829  AVRational tb;
1830 #endif
1831  while (is->paused && !is->videoq.abort_request)
1832  SDL_Delay(10);
1833 
1835  av_free_packet(&pkt);
1836 
1837  ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
1838  if (ret < 0)
1839  goto the_end;
1840 
1841  if (!ret)
1842  continue;
1843 
1844 #if CONFIG_AVFILTER
1845  if ( last_w != frame->width
1846  || last_h != frame->height
1847  || last_format != frame->format) {
1848  av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1849  last_w, last_h, frame->width, frame->height);
1850  avfilter_graph_free(&graph);
1851  graph = avfilter_graph_alloc();
1852  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1853  SDL_Event event;
1854  event.type = FF_QUIT_EVENT;
1855  event.user.data1 = is;
1856  SDL_PushEvent(&event);
1857  av_free_packet(&pkt);
1858  goto the_end;
1859  }
1860  filt_in = is->in_video_filter;
1861  filt_out = is->out_video_filter;
1862  last_w = frame->width;
1863  last_h = frame->height;
1864  last_format = frame->format;
1865  }
1866 
1867  frame->pts = pts_int;
1868  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1869  if (is->use_dr1 && frame->opaque) {
1870  FrameBuffer *buf = frame->opaque;
1872  frame->data, frame->linesize,
1874  frame->width, frame->height,
1875  frame->format);
1876 
1877  avfilter_copy_frame_props(fb, frame);
1878  fb->buf->priv = buf;
1880 
1881  buf->refcount++;
1883 
1884  } else
1885  av_buffersrc_write_frame(filt_in, frame);
1886 
1887  av_free_packet(&pkt);
1888 
1889  while (ret >= 0) {
1890  is->frame_last_returned_time = av_gettime() / 1000000.0;
1891 
1892  ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
1893  if (ret < 0) {
1894  ret = 0;
1895  break;
1896  }
1897 
1899  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1900  is->frame_last_filter_delay = 0;
1901 
1902  avfilter_copy_buf_props(frame, picref);
1903 
1904  pts_int = picref->pts;
1905  tb = filt_out->inputs[0]->time_base;
1906  pos = picref->pos;
1907  frame->opaque = picref;
1908 
1909  if (av_cmp_q(tb, is->video_st->time_base)) {
1910  av_unused int64_t pts1 = pts_int;
1911  pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1912  av_dlog(NULL, "video_thread(): "
1913  "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1914  tb.num, tb.den, pts1,
1915  is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1916  }
1917  pts = pts_int * av_q2d(is->video_st->time_base);
1918  ret = queue_picture(is, frame, pts, pos, serial);
1919  }
1920 #else
1921  pts = pts_int * av_q2d(is->video_st->time_base);
1922  ret = queue_picture(is, frame, pts, pkt.pos, serial);
1923 #endif
1924 
1925  if (ret < 0)
1926  goto the_end;
1927  }
1928  the_end:
1930 #if CONFIG_AVFILTER
1931  avfilter_graph_free(&graph);
1932 #endif
1933  av_free_packet(&pkt);
1934  avcodec_free_frame(&frame);
1935  return 0;
1936 }
1937 
1938 static int subtitle_thread(void *arg)
1939 {
1940  VideoState *is = arg;
1941  SubPicture *sp;
1942  AVPacket pkt1, *pkt = &pkt1;
1943  int got_subtitle;
1944  double pts;
1945  int i, j;
1946  int r, g, b, y, u, v, a;
1947 
1948  for (;;) {
1949  while (is->paused && !is->subtitleq.abort_request) {
1950  SDL_Delay(10);
1951  }
1952  if (packet_queue_get(&is->subtitleq, pkt, 1, NULL) < 0)
1953  break;
1954 
1955  if (pkt->data == flush_pkt.data) {
1957  continue;
1958  }
1959  SDL_LockMutex(is->subpq_mutex);
1960  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1961  !is->subtitleq.abort_request) {
1962  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1963  }
1964  SDL_UnlockMutex(is->subpq_mutex);
1965 
1966  if (is->subtitleq.abort_request)
1967  return 0;
1968 
1969  sp = &is->subpq[is->subpq_windex];
1970 
1971  /* NOTE: ipts is the PTS of the _first_ picture beginning in
1972  this packet, if any */
1973  pts = 0;
1974  if (pkt->pts != AV_NOPTS_VALUE)
1975  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1976 
1978  &got_subtitle, pkt);
1979  if (got_subtitle && sp->sub.format == 0) {
1980  if (sp->sub.pts != AV_NOPTS_VALUE)
1981  pts = sp->sub.pts / (double)AV_TIME_BASE;
1982  sp->pts = pts;
1983 
1984  for (i = 0; i < sp->sub.num_rects; i++)
1985  {
1986  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1987  {
1988  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1989  y = RGB_TO_Y_CCIR(r, g, b);
1990  u = RGB_TO_U_CCIR(r, g, b, 0);
1991  v = RGB_TO_V_CCIR(r, g, b, 0);
1992  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1993  }
1994  }
1995 
1996  /* now we can update the picture count */
1997  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1998  is->subpq_windex = 0;
1999  SDL_LockMutex(is->subpq_mutex);
2000  is->subpq_size++;
2001  SDL_UnlockMutex(is->subpq_mutex);
2002  }
2003  av_free_packet(pkt);
2004  }
2005  return 0;
2006 }
2007 
2008 /* copy samples for viewing in editor window */
2009 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2010 {
2011  int size, len;
2012 
2013  size = samples_size / sizeof(short);
2014  while (size > 0) {
2016  if (len > size)
2017  len = size;
2018  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2019  samples += len;
2020  is->sample_array_index += len;
2022  is->sample_array_index = 0;
2023  size -= len;
2024  }
2025 }
2026 
2027 /* return the wanted number of samples to get better sync if sync_type is video
2028  * or external master clock */
2030 {
2031  int wanted_nb_samples = nb_samples;
2032 
2033  /* if not master, then we try to remove or add samples to correct the clock */
2035  double diff, avg_diff;
2036  int min_nb_samples, max_nb_samples;
2037 
2038  diff = get_audio_clock(is) - get_master_clock(is);
2039 
2040  if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
2041  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2043  /* not enough measures to have a correct estimate */
2044  is->audio_diff_avg_count++;
2045  } else {
2046  /* estimate the A-V difference */
2047  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2048 
2049  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2050  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2051  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2052  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2053  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2054  }
2055  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2056  diff, avg_diff, wanted_nb_samples - nb_samples,
2058  }
2059  } else {
2060  /* too big difference : may be initial PTS errors, so
2061  reset A-V filter */
2062  is->audio_diff_avg_count = 0;
2063  is->audio_diff_cum = 0;
2064  }
2065  }
2066 
2067  return wanted_nb_samples;
2068 }
2069 
2070 /* decode one audio frame and returns its uncompressed size */
2071 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2072 {
2073  AVPacket *pkt_temp = &is->audio_pkt_temp;
2074  AVPacket *pkt = &is->audio_pkt;
2075  AVCodecContext *dec = is->audio_st->codec;
2076  int len1, len2, data_size, resampled_data_size;
2077  int64_t dec_channel_layout;
2078  int got_frame;
2079  double pts;
2080  int new_packet = 0;
2081  int flush_complete = 0;
2082  int wanted_nb_samples;
2083 
2084  for (;;) {
2085  /* NOTE: the audio packet can contain several frames */
2086  while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
2087  if (!is->frame) {
2088  if (!(is->frame = avcodec_alloc_frame()))
2089  return AVERROR(ENOMEM);
2090  } else
2092 
2093  if (is->paused)
2094  return -1;
2095 
2096  if (flush_complete)
2097  break;
2098  new_packet = 0;
2099  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2100  if (len1 < 0) {
2101  /* if error, we skip the frame */
2102  pkt_temp->size = 0;
2103  break;
2104  }
2105 
2106  pkt_temp->data += len1;
2107  pkt_temp->size -= len1;
2108 
2109  if (!got_frame) {
2110  /* stop sending empty packets if the decoder is finished */
2111  if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
2112  flush_complete = 1;
2113  continue;
2114  }
2115  data_size = av_samples_get_buffer_size(NULL, is->frame->channels,
2116  is->frame->nb_samples,
2117  is->frame->format, 1);
2118 
2119  dec_channel_layout =
2122  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2123 
2124  if (is->frame->format != is->audio_src.fmt ||
2125  dec_channel_layout != is->audio_src.channel_layout ||
2126  is->frame->sample_rate != is->audio_src.freq ||
2127  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2128  swr_free(&is->swr_ctx);
2131  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2132  0, NULL);
2133  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2134  fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2137  break;
2138  }
2139  is->audio_src.channel_layout = dec_channel_layout;
2140  is->audio_src.channels = is->frame->channels;
2141  is->audio_src.freq = is->frame->sample_rate;
2142  is->audio_src.fmt = is->frame->format;
2143  }
2144 
2145  if (is->swr_ctx) {
2146  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2147  uint8_t *out[] = {is->audio_buf2};
2148  int out_count = sizeof(is->audio_buf2) / is->audio_tgt.channels / av_get_bytes_per_sample(is->audio_tgt.fmt);
2149  if (wanted_nb_samples != is->frame->nb_samples) {
2150  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2151  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2152  fprintf(stderr, "swr_set_compensation() failed\n");
2153  break;
2154  }
2155  }
2156  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2157  if (len2 < 0) {
2158  fprintf(stderr, "swr_convert() failed\n");
2159  break;
2160  }
2161  if (len2 == out_count) {
2162  fprintf(stderr, "warning: audio buffer is probably too small\n");
2163  swr_init(is->swr_ctx);
2164  }
2165  is->audio_buf = is->audio_buf2;
2166  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2167  } else {
2168  is->audio_buf = is->frame->data[0];
2169  resampled_data_size = data_size;
2170  }
2171 
2172  /* if no pts, then compute it */
2173  pts = is->audio_clock;
2174  *pts_ptr = pts;
2175  is->audio_clock += (double)data_size /
2177 #ifdef DEBUG
2178  {
2179  static double last_clock;
2180  printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2181  is->audio_clock - last_clock,
2182  is->audio_clock, pts);
2183  last_clock = is->audio_clock;
2184  }
2185 #endif
2186  return resampled_data_size;
2187  }
2188 
2189  /* free the current packet */
2190  if (pkt->data)
2191  av_free_packet(pkt);
2192  memset(pkt_temp, 0, sizeof(*pkt_temp));
2193 
2194  if (is->paused || is->audioq.abort_request) {
2195  return -1;
2196  }
2197 
2198  if (is->audioq.nb_packets == 0)
2199  SDL_CondSignal(is->continue_read_thread);
2200 
2201  /* read next packet */
2202  if ((new_packet = packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2203  return -1;
2204 
2205  if (pkt->data == flush_pkt.data) {
2206  avcodec_flush_buffers(dec);
2207  flush_complete = 0;
2208  }
2209 
2210  *pkt_temp = *pkt;
2211 
2212  /* if update the audio clock with the pts */
2213  if (pkt->pts != AV_NOPTS_VALUE) {
2214  is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2215  }
2216  }
2217 }
2218 
2219 /* prepare a new audio buffer */
2220 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2221 {
2222  VideoState *is = opaque;
2223  int audio_size, len1;
2224  int bytes_per_sec;
2226  double pts;
2227 
2229 
2230  while (len > 0) {
2231  if (is->audio_buf_index >= is->audio_buf_size) {
2232  audio_size = audio_decode_frame(is, &pts);
2233  if (audio_size < 0) {
2234  /* if error, just output silence */
2235  is->audio_buf = is->silence_buf;
2236  is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2237  } else {
2238  if (is->show_mode != SHOW_MODE_VIDEO)
2239  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2240  is->audio_buf_size = audio_size;
2241  }
2242  is->audio_buf_index = 0;
2243  }
2244  len1 = is->audio_buf_size - is->audio_buf_index;
2245  if (len1 > len)
2246  len1 = len;
2247  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2248  len -= len1;
2249  stream += len1;
2250  is->audio_buf_index += len1;
2251  }
2252  bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2254  /* Let's assume the audio driver that is used by SDL has two periods. */
2255  is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2257  if (is->audioq.serial == is->audio_pkt_temp_serial)
2259 }
2260 
2261 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2262 {
2263  SDL_AudioSpec wanted_spec, spec;
2264  const char *env;
2265  const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2266 
2267  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2268  if (env) {
2269  wanted_nb_channels = atoi(env);
2270  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2271  }
2272  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2273  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2274  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2275  }
2276  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2277  wanted_spec.freq = wanted_sample_rate;
2278  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2279  fprintf(stderr, "Invalid sample rate or channel count!\n");
2280  return -1;
2281  }
2282  wanted_spec.format = AUDIO_S16SYS;
2283  wanted_spec.silence = 0;
2284  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2285  wanted_spec.callback = sdl_audio_callback;
2286  wanted_spec.userdata = opaque;
2287  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2288  fprintf(stderr, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2289  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2290  if (!wanted_spec.channels) {
2291  fprintf(stderr, "No more channel combinations to try, audio open failed\n");
2292  return -1;
2293  }
2294  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2295  }
2296  if (spec.format != AUDIO_S16SYS) {
2297  fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
2298  return -1;
2299  }
2300  if (spec.channels != wanted_spec.channels) {
2301  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2302  if (!wanted_channel_layout) {
2303  fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
2304  return -1;
2305  }
2306  }
2307 
2308  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2309  audio_hw_params->freq = spec.freq;
2310  audio_hw_params->channel_layout = wanted_channel_layout;
2311  audio_hw_params->channels = spec.channels;
2312  return spec.size;
2313 }
2314 
2315 /* open a given stream. Return 0 if OK */
2316 static int stream_component_open(VideoState *is, int stream_index)
2317 {
2318  AVFormatContext *ic = is->ic;
2319  AVCodecContext *avctx;
2320  AVCodec *codec;
2321  const char *forced_codec_name = NULL;
2322  AVDictionary *opts;
2324 
2325  if (stream_index < 0 || stream_index >= ic->nb_streams)
2326  return -1;
2327  avctx = ic->streams[stream_index]->codec;
2328 
2329  codec = avcodec_find_decoder(avctx->codec_id);
2330 
2331  switch(avctx->codec_type){
2332  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2333  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2334  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2335  }
2336  if (forced_codec_name)
2337  codec = avcodec_find_decoder_by_name(forced_codec_name);
2338  if (!codec) {
2339  if (forced_codec_name) fprintf(stderr, "No codec could be found with name '%s'\n", forced_codec_name);
2340  else fprintf(stderr, "No codec could be found with id %d\n", avctx->codec_id);
2341  return -1;
2342  }
2343 
2344  avctx->codec_id = codec->id;
2346  avctx->lowres = lowres;
2347  if(avctx->lowres > codec->max_lowres){
2348  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2349  codec->max_lowres);
2350  avctx->lowres= codec->max_lowres;
2351  }
2352  avctx->idct_algo = idct;
2353  avctx->skip_frame = skip_frame;
2354  avctx->skip_idct = skip_idct;
2357 
2358  if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2359  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2360  if(codec->capabilities & CODEC_CAP_DR1)
2361  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2362 
2363  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2364  if (!av_dict_get(opts, "threads", NULL, 0))
2365  av_dict_set(&opts, "threads", "auto", 0);
2366  if (avcodec_open2(avctx, codec, &opts) < 0)
2367  return -1;
2368  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2369  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2370  return AVERROR_OPTION_NOT_FOUND;
2371  }
2372 
2373  /* prepare audio output */
2374  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2375  int audio_hw_buf_size = audio_open(is, avctx->channel_layout, avctx->channels, avctx->sample_rate, &is->audio_src);
2376  if (audio_hw_buf_size < 0)
2377  return -1;
2378  is->audio_hw_buf_size = audio_hw_buf_size;
2379  is->audio_tgt = is->audio_src;
2380  }
2381 
2382  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2383  switch (avctx->codec_type) {
2384  case AVMEDIA_TYPE_AUDIO:
2385  is->audio_stream = stream_index;
2386  is->audio_st = ic->streams[stream_index];
2387  is->audio_buf_size = 0;
2388  is->audio_buf_index = 0;
2389 
2390  /* init averaging filter */
2391  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2392  is->audio_diff_avg_count = 0;
2393  /* since we do not have a precise anough audio fifo fullness,
2394  we correct audio sync only if larger than this threshold */
2396 
2397  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2398  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2399  packet_queue_start(&is->audioq);
2400  SDL_PauseAudio(0);
2401  break;
2402  case AVMEDIA_TYPE_VIDEO:
2403  is->video_stream = stream_index;
2404  is->video_st = ic->streams[stream_index];
2405 
2406  packet_queue_start(&is->videoq);
2407  is->video_tid = SDL_CreateThread(video_thread, is);
2408  break;
2409  case AVMEDIA_TYPE_SUBTITLE:
2410  is->subtitle_stream = stream_index;
2411  is->subtitle_st = ic->streams[stream_index];
2413 
2414  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2415  break;
2416  default:
2417  break;
2418  }
2419  return 0;
2420 }
2421 
2422 static void stream_component_close(VideoState *is, int stream_index)
2423 {
2424  AVFormatContext *ic = is->ic;
2425  AVCodecContext *avctx;
2426 
2427  if (stream_index < 0 || stream_index >= ic->nb_streams)
2428  return;
2429  avctx = ic->streams[stream_index]->codec;
2430 
2431  switch (avctx->codec_type) {
2432  case AVMEDIA_TYPE_AUDIO:
2433  packet_queue_abort(&is->audioq);
2434 
2435  SDL_CloseAudio();
2436 
2437  packet_queue_flush(&is->audioq);
2438  av_free_packet(&is->audio_pkt);
2439  swr_free(&is->swr_ctx);
2440  av_freep(&is->audio_buf1);
2441  is->audio_buf = NULL;
2442  avcodec_free_frame(&is->frame);
2443 
2444  if (is->rdft) {
2445  av_rdft_end(is->rdft);
2446  av_freep(&is->rdft_data);
2447  is->rdft = NULL;
2448  is->rdft_bits = 0;
2449  }
2450  break;
2451  case AVMEDIA_TYPE_VIDEO:
2452  packet_queue_abort(&is->videoq);
2453 
2454  /* note: we also signal this mutex to make sure we deblock the
2455  video thread in all cases */
2456  SDL_LockMutex(is->pictq_mutex);
2457  SDL_CondSignal(is->pictq_cond);
2458  SDL_UnlockMutex(is->pictq_mutex);
2459 
2460  SDL_WaitThread(is->video_tid, NULL);
2461 
2462  packet_queue_flush(&is->videoq);
2463  break;
2464  case AVMEDIA_TYPE_SUBTITLE:
2466 
2467  /* note: we also signal this mutex to make sure we deblock the
2468  video thread in all cases */
2469  SDL_LockMutex(is->subpq_mutex);
2470  is->subtitle_stream_changed = 1;
2471 
2472  SDL_CondSignal(is->subpq_cond);
2473  SDL_UnlockMutex(is->subpq_mutex);
2474 
2475  SDL_WaitThread(is->subtitle_tid, NULL);
2476 
2478  break;
2479  default:
2480  break;
2481  }
2482 
2483  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2484  avcodec_close(avctx);
2485 #if CONFIG_AVFILTER
2486  free_buffer_pool(&is->buffer_pool);
2487 #endif
2488  switch (avctx->codec_type) {
2489  case AVMEDIA_TYPE_AUDIO:
2490  is->audio_st = NULL;
2491  is->audio_stream = -1;
2492  break;
2493  case AVMEDIA_TYPE_VIDEO:
2494  is->video_st = NULL;
2495  is->video_stream = -1;
2496  break;
2497  case AVMEDIA_TYPE_SUBTITLE:
2498  is->subtitle_st = NULL;
2499  is->subtitle_stream = -1;
2500  break;
2501  default:
2502  break;
2503  }
2504 }
2505 
2506 static int decode_interrupt_cb(void *ctx)
2507 {
2508  VideoState *is = ctx;
2509  return is->abort_request;
2510 }
2511 
2513 {
2514  if( !strcmp(s->iformat->name, "rtp")
2515  || !strcmp(s->iformat->name, "rtsp")
2516  || !strcmp(s->iformat->name, "sdp")
2517  )
2518  return 1;
2519 
2520  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2521  || !strncmp(s->filename, "udp:", 4)
2522  )
2523  )
2524  return 1;
2525  return 0;
2526 }
2527 
2528 /* this thread gets the stream from the disk or the network */
2529 static int read_thread(void *arg)
2530 {
2531  VideoState *is = arg;
2532  AVFormatContext *ic = NULL;
2533  int err, i, ret;
2534  int st_index[AVMEDIA_TYPE_NB];
2535  AVPacket pkt1, *pkt = &pkt1;
2536  int eof = 0;
2537  int pkt_in_play_range = 0;
2539  AVDictionary **opts;
2540  int orig_nb_streams;
2541  SDL_mutex *wait_mutex = SDL_CreateMutex();
2542 
2543  memset(st_index, -1, sizeof(st_index));
2544  is->last_video_stream = is->video_stream = -1;
2545  is->last_audio_stream = is->audio_stream = -1;
2546  is->last_subtitle_stream = is->subtitle_stream = -1;
2547 
2548  ic = avformat_alloc_context();
2550  ic->interrupt_callback.opaque = is;
2551  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2552  if (err < 0) {
2553  print_error(is->filename, err);
2554  ret = -1;
2555  goto fail;
2556  }
2558  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2560  goto fail;
2561  }
2562  is->ic = ic;
2563 
2564  if (genpts)
2565  ic->flags |= AVFMT_FLAG_GENPTS;
2566 
2568  orig_nb_streams = ic->nb_streams;
2569 
2570  err = avformat_find_stream_info(ic, opts);
2571  if (err < 0) {
2572  fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2573  ret = -1;
2574  goto fail;
2575  }
2576  for (i = 0; i < orig_nb_streams; i++)
2577  av_dict_free(&opts[i]);
2578  av_freep(&opts);
2579 
2580  if (ic->pb)
2581  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2582 
2583  if (seek_by_bytes < 0)
2585 
2586  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2587 
2588  /* if seeking requested, we execute it */
2589  if (start_time != AV_NOPTS_VALUE) {
2590  int64_t timestamp;
2591 
2592  timestamp = start_time;
2593  /* add the stream start time */
2594  if (ic->start_time != AV_NOPTS_VALUE)
2595  timestamp += ic->start_time;
2596  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2597  if (ret < 0) {
2598  fprintf(stderr, "%s: could not seek to position %0.3f\n",
2599  is->filename, (double)timestamp / AV_TIME_BASE);
2600  }
2601  }
2602 
2603  is->realtime = is_realtime(ic);
2604 
2605  for (i = 0; i < ic->nb_streams; i++)
2606  ic->streams[i]->discard = AVDISCARD_ALL;
2607  if (!video_disable)
2608  st_index[AVMEDIA_TYPE_VIDEO] =
2611  if (!audio_disable)
2612  st_index[AVMEDIA_TYPE_AUDIO] =
2615  st_index[AVMEDIA_TYPE_VIDEO],
2616  NULL, 0);
2617  if (!video_disable)
2618  st_index[AVMEDIA_TYPE_SUBTITLE] =
2621  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2622  st_index[AVMEDIA_TYPE_AUDIO] :
2623  st_index[AVMEDIA_TYPE_VIDEO]),
2624  NULL, 0);
2625  if (show_status) {
2626  av_dump_format(ic, 0, is->filename, 0);
2627  }
2628 
2629  is->show_mode = show_mode;
2630 
2631  /* open the streams */
2632  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2633  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2634  }
2635 
2636  ret = -1;
2637  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2638  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2639  }
2640  is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2641  if (is->show_mode == SHOW_MODE_NONE)
2642  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2643 
2644  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2645  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2646  }
2647 
2648  if (is->video_stream < 0 && is->audio_stream < 0) {
2649  fprintf(stderr, "%s: could not open codecs\n", is->filename);
2650  ret = -1;
2651  goto fail;
2652  }
2653 
2654  if (infinite_buffer < 0 && is->realtime)
2655  infinite_buffer = 1;
2656 
2657  for (;;) {
2658  if (is->abort_request)
2659  break;
2660  if (is->paused != is->last_paused) {
2661  is->last_paused = is->paused;
2662  if (is->paused)
2663  is->read_pause_return = av_read_pause(ic);
2664  else
2665  av_read_play(ic);
2666  }
2667 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2668  if (is->paused &&
2669  (!strcmp(ic->iformat->name, "rtsp") ||
2670  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2671  /* wait 10 ms to avoid trying to get another packet */
2672  /* XXX: horrible */
2673  SDL_Delay(10);
2674  continue;
2675  }
2676 #endif
2677  if (is->seek_req) {
2678  int64_t seek_target = is->seek_pos;
2679  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2680  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2681 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2682 // of the seek_pos/seek_rel variables
2683 
2684  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2685  if (ret < 0) {
2686  fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2687  } else {
2688  if (is->audio_stream >= 0) {
2689  packet_queue_flush(&is->audioq);
2690  packet_queue_put(&is->audioq, &flush_pkt);
2691  }
2692  if (is->subtitle_stream >= 0) {
2694  packet_queue_put(&is->subtitleq, &flush_pkt);
2695  }
2696  if (is->video_stream >= 0) {
2697  packet_queue_flush(&is->videoq);
2698  packet_queue_put(&is->videoq, &flush_pkt);
2699  }
2700  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2701  //FIXME: use a cleaner way to signal obsolete external clock...
2703  } else {
2704  update_external_clock_pts(is, seek_target / (double)AV_TIME_BASE);
2705  }
2706  }
2707  is->seek_req = 0;
2708  eof = 0;
2709  }
2710  if (is->queue_attachments_req) {
2712  is->queue_attachments_req = 0;
2713  }
2714 
2715  /* if the queue are full, no need to read more */
2716  if (infinite_buffer<1 &&
2717  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2718  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2719  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request)
2720  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2721  /* wait 10 ms */
2722  SDL_LockMutex(wait_mutex);
2723  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2724  SDL_UnlockMutex(wait_mutex);
2725  continue;
2726  }
2727  if (eof) {
2728  if (is->video_stream >= 0) {
2729  av_init_packet(pkt);
2730  pkt->data = NULL;
2731  pkt->size = 0;
2732  pkt->stream_index = is->video_stream;
2733  packet_queue_put(&is->videoq, pkt);
2734  }
2735  if (is->audio_stream >= 0 &&
2737  av_init_packet(pkt);
2738  pkt->data = NULL;
2739  pkt->size = 0;
2740  pkt->stream_index = is->audio_stream;
2741  packet_queue_put(&is->audioq, pkt);
2742  }
2743  SDL_Delay(10);
2744  if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2745  if (loop != 1 && (!loop || --loop)) {
2746  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2747  } else if (autoexit) {
2748  ret = AVERROR_EOF;
2749  goto fail;
2750  }
2751  }
2752  eof=0;
2753  continue;
2754  }
2755  ret = av_read_frame(ic, pkt);
2756  if (ret < 0) {
2757  if (ret == AVERROR_EOF || url_feof(ic->pb))
2758  eof = 1;
2759  if (ic->pb && ic->pb->error)
2760  break;
2761  SDL_LockMutex(wait_mutex);
2762  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2763  SDL_UnlockMutex(wait_mutex);
2764  continue;
2765  }
2766  /* check if packet is in play range specified by user, then queue, otherwise discard */
2767  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2768  (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2769  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2770  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2771  <= ((double)duration / 1000000);
2772  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2773  packet_queue_put(&is->audioq, pkt);
2774  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2775  packet_queue_put(&is->videoq, pkt);
2776  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2777  packet_queue_put(&is->subtitleq, pkt);
2778  } else {
2779  av_free_packet(pkt);
2780  }
2781  }
2782  /* wait until the end */
2783  while (!is->abort_request) {
2784  SDL_Delay(100);
2785  }
2786 
2787  ret = 0;
2788  fail:
2789  /* close each stream */
2790  if (is->audio_stream >= 0)
2792  if (is->video_stream >= 0)
2794  if (is->subtitle_stream >= 0)
2796  if (is->ic) {
2797  avformat_close_input(&is->ic);
2798  }
2799 
2800  if (ret != 0) {
2801  SDL_Event event;
2802 
2803  event.type = FF_QUIT_EVENT;
2804  event.user.data1 = is;
2805  SDL_PushEvent(&event);
2806  }
2807  SDL_DestroyMutex(wait_mutex);
2808  return 0;
2809 }
2810 
2811 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2812 {
2813  VideoState *is;
2814 
2815  is = av_mallocz(sizeof(VideoState));
2816  if (!is)
2817  return NULL;
2818  av_strlcpy(is->filename, filename, sizeof(is->filename));
2819  is->iformat = iformat;
2820  is->ytop = 0;
2821  is->xleft = 0;
2822 
2823  /* start video display */
2824  is->pictq_mutex = SDL_CreateMutex();
2825  is->pictq_cond = SDL_CreateCond();
2826 
2827  is->subpq_mutex = SDL_CreateMutex();
2828  is->subpq_cond = SDL_CreateCond();
2829 
2830  packet_queue_init(&is->videoq);
2831  packet_queue_init(&is->audioq);
2833 
2834  is->continue_read_thread = SDL_CreateCond();
2835 
2836  //FIXME: use a cleaner way to signal obsolete external clock...
2838  update_external_clock_speed(is, 1.0);
2839  is->audio_current_pts_drift = -av_gettime() / 1000000.0;
2841  is->av_sync_type = av_sync_type;
2842  is->read_tid = SDL_CreateThread(read_thread, is);
2843  if (!is->read_tid) {
2844  av_free(is);
2845  return NULL;
2846  }
2847  return is;
2848 }
2849 
2851 {
2852  AVFormatContext *ic = is->ic;
2853  int start_index, stream_index;
2854  int old_index;
2855  AVStream *st;
2856 
2857  if (codec_type == AVMEDIA_TYPE_VIDEO) {
2858  start_index = is->last_video_stream;
2859  old_index = is->video_stream;
2860  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
2861  start_index = is->last_audio_stream;
2862  old_index = is->audio_stream;
2863  } else {
2864  start_index = is->last_subtitle_stream;
2865  old_index = is->subtitle_stream;
2866  }
2867  stream_index = start_index;
2868  for (;;) {
2869  if (++stream_index >= is->ic->nb_streams)
2870  {
2871  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2872  {
2873  stream_index = -1;
2874  is->last_subtitle_stream = -1;
2875  goto the_end;
2876  }
2877  if (start_index == -1)
2878  return;
2879  stream_index = 0;
2880  }
2881  if (stream_index == start_index)
2882  return;
2883  st = ic->streams[stream_index];
2884  if (st->codec->codec_type == codec_type) {
2885  /* check that parameters are OK */
2886  switch (codec_type) {
2887  case AVMEDIA_TYPE_AUDIO:
2888  if (st->codec->sample_rate != 0 &&
2889  st->codec->channels != 0)
2890  goto the_end;
2891  break;
2892  case AVMEDIA_TYPE_VIDEO:
2893  case AVMEDIA_TYPE_SUBTITLE:
2894  goto the_end;
2895  default:
2896  break;
2897  }
2898  }
2899  }
2900  the_end:
2901  stream_component_close(is, old_index);
2902  stream_component_open(is, stream_index);
2903  if (codec_type == AVMEDIA_TYPE_VIDEO)
2904  is->queue_attachments_req = 1;
2905 }
2906 
2907 
2909 {
2910 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2911  /* OS X needs to reallocate the SDL overlays */
2912  int i;
2913  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2914  is->pictq[i].reallocate = 1;
2915 #endif
2917  video_open(is, 1);
2918 }
2919 
2920 static void toggle_pause(VideoState *is)
2921 {
2922  stream_toggle_pause(is);
2923  is->step = 0;
2924 }
2925 
2927 {
2928  /* if the stream is paused unpause it, then step */
2929  if (is->paused)
2930  stream_toggle_pause(is);
2931  is->step = 1;
2932 }
2933 
2935 {
2936  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2937  is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
2939  is->xleft, is->ytop, is->width, is->height,
2940  bgcolor, 1);
2941 }
2942 
2943 /* handle an event sent by the GUI */
2944 static void event_loop(VideoState *cur_stream)
2945 {
2946  SDL_Event event;
2947  double incr, pos, frac;
2948 
2949  for (;;) {
2950  double x;
2951  SDL_WaitEvent(&event);
2952  switch (event.type) {
2953  case SDL_KEYDOWN:
2954  if (exit_on_keydown) {
2955  do_exit(cur_stream);
2956  break;
2957  }
2958  switch (event.key.keysym.sym) {
2959  case SDLK_ESCAPE:
2960  case SDLK_q:
2961  do_exit(cur_stream);
2962  break;
2963  case SDLK_f:
2964  toggle_full_screen(cur_stream);
2965  cur_stream->force_refresh = 1;
2966  break;
2967  case SDLK_p:
2968  case SDLK_SPACE:
2969  toggle_pause(cur_stream);
2970  break;
2971  case SDLK_s: // S: Step to next frame
2972  step_to_next_frame(cur_stream);
2973  break;
2974  case SDLK_a:
2976  break;
2977  case SDLK_v:
2979  break;
2980  case SDLK_t:
2982  break;
2983  case SDLK_w:
2984  toggle_audio_display(cur_stream);
2985  cur_stream->force_refresh = 1;
2986  break;
2987  case SDLK_PAGEUP:
2988  incr = 600.0;
2989  goto do_seek;
2990  case SDLK_PAGEDOWN:
2991  incr = -600.0;
2992  goto do_seek;
2993  case SDLK_LEFT:
2994  incr = -10.0;
2995  goto do_seek;
2996  case SDLK_RIGHT:
2997  incr = 10.0;
2998  goto do_seek;
2999  case SDLK_UP:
3000  incr = 60.0;
3001  goto do_seek;
3002  case SDLK_DOWN:
3003  incr = -60.0;
3004  do_seek:
3005  if (seek_by_bytes) {
3006  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3007  pos = cur_stream->video_current_pos;
3008  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3009  pos = cur_stream->audio_pkt.pos;
3010  } else
3011  pos = avio_tell(cur_stream->ic->pb);
3012  if (cur_stream->ic->bit_rate)
3013  incr *= cur_stream->ic->bit_rate / 8.0;
3014  else
3015  incr *= 180000.0;
3016  pos += incr;
3017  stream_seek(cur_stream, pos, incr, 1);
3018  } else {
3019  pos = get_master_clock(cur_stream);
3020  pos += incr;
3021  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3022  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3023  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3024  }
3025  break;
3026  default:
3027  break;
3028  }
3029  break;
3030  case SDL_VIDEOEXPOSE:
3031  cur_stream->force_refresh = 1;
3032  break;
3033  case SDL_MOUSEBUTTONDOWN:
3034  if (exit_on_mousedown) {
3035  do_exit(cur_stream);
3036  break;
3037  }
3038  case SDL_MOUSEMOTION:
3039  if (cursor_hidden) {
3040  SDL_ShowCursor(1);
3041  cursor_hidden = 0;
3042  }
3044  if (event.type == SDL_MOUSEBUTTONDOWN) {
3045  x = event.button.x;
3046  } else {
3047  if (event.motion.state != SDL_PRESSED)
3048  break;
3049  x = event.motion.x;
3050  }
3051  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3052  uint64_t size = avio_size(cur_stream->ic->pb);
3053  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3054  } else {
3055  int64_t ts;
3056  int ns, hh, mm, ss;
3057  int tns, thh, tmm, tss;
3058  tns = cur_stream->ic->duration / 1000000LL;
3059  thh = tns / 3600;
3060  tmm = (tns % 3600) / 60;
3061  tss = (tns % 60);
3062  frac = x / cur_stream->width;
3063  ns = frac * tns;
3064  hh = ns / 3600;
3065  mm = (ns % 3600) / 60;
3066  ss = (ns % 60);
3067  fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3068  hh, mm, ss, thh, tmm, tss);
3069  ts = frac * cur_stream->ic->duration;
3070  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3071  ts += cur_stream->ic->start_time;
3072  stream_seek(cur_stream, ts, 0, 0);
3073  }
3074  break;
3075  case SDL_VIDEORESIZE:
3076  screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
3077  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3078  screen_width = cur_stream->width = event.resize.w;
3079  screen_height = cur_stream->height = event.resize.h;
3080  cur_stream->force_refresh = 1;
3081  break;
3082  case SDL_QUIT:
3083  case FF_QUIT_EVENT:
3084  do_exit(cur_stream);
3085  break;
3086  case FF_ALLOC_EVENT:
3087  alloc_picture(event.user.data1);
3088  break;
3089  case FF_REFRESH_EVENT:
3091  SDL_ShowCursor(0);
3092  cursor_hidden = 1;
3093  }
3094  video_refresh(event.user.data1);
3095  cur_stream->refresh = 0;
3096  break;
3097  default:
3098  break;
3099  }
3100  }
3101 }
3102 
3103 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3104 {
3105  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3106  return opt_default(NULL, "video_size", arg);
3107 }
3108 
3109 static int opt_width(void *optctx, const char *opt, const char *arg)
3110 {
3111  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3112  return 0;
3113 }
3114 
3115 static int opt_height(void *optctx, const char *opt, const char *arg)
3116 {
3117  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3118  return 0;
3119 }
3120 
3121 static int opt_format(void *optctx, const char *opt, const char *arg)
3122 {
3123  file_iformat = av_find_input_format(arg);
3124  if (!file_iformat) {
3125  fprintf(stderr, "Unknown input format: %s\n", arg);
3126  return AVERROR(EINVAL);
3127  }
3128  return 0;
3129 }
3130 
3131 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3132 {
3133  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3134  return opt_default(NULL, "pixel_format", arg);
3135 }
3136 
3137 static int opt_sync(void *optctx, const char *opt, const char *arg)
3138 {
3139  if (!strcmp(arg, "audio"))
3141  else if (!strcmp(arg, "video"))
3143  else if (!strcmp(arg, "ext"))
3145  else {
3146  fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3147  exit(1);
3148  }
3149  return 0;
3150 }
3151 
3152 static int opt_seek(void *optctx, const char *opt, const char *arg)
3153 {
3154  start_time = parse_time_or_die(opt, arg, 1);
3155  return 0;
3156 }
3157 
3158 static int opt_duration(void *optctx, const char *opt, const char *arg)
3159 {
3160  duration = parse_time_or_die(opt, arg, 1);
3161  return 0;
3162 }
3163 
3164 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3165 {
3166  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3167  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3168  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3169  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3170  return 0;
3171 }
3172 
3173 static void opt_input_file(void *optctx, const char *filename)
3174 {
3175  if (input_filename) {
3176  fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3177  filename, input_filename);
3178  exit(1);
3179  }
3180  if (!strcmp(filename, "-"))
3181  filename = "pipe:";
3182  input_filename = filename;
3183 }
3184 
3185 static int opt_codec(void *optctx, const char *opt, const char *arg)
3186 {
3187  const char *spec = strchr(opt, ':');
3188  if (!spec) {
3189  fprintf(stderr, "No media specifier was specified in '%s' in option '%s'\n",
3190  arg, opt);
3191  return AVERROR(EINVAL);
3192  }
3193  spec++;
3194  switch (spec[0]) {
3195  case 'a' : audio_codec_name = arg; break;
3196  case 's' : subtitle_codec_name = arg; break;
3197  case 'v' : video_codec_name = arg; break;
3198  default:
3199  fprintf(stderr, "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3200  return AVERROR(EINVAL);
3201  }
3202  return 0;
3203 }
3204 
3205 static int dummy;
3206 
3207 static const OptionDef options[] = {
3208 #include "cmdutils_common_opts.h"
3209  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3210  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3211  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3212  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3213  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3214  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3215  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3216  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3217  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3218  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3219  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3220  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3221  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3222  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3223  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3224  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3225  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3226  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3227  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3228  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3229  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3230  { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
3231  { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
3232  { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
3233  { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo", "algo" },
3234  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3235  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3236  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3237  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3238  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3239  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3240  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3241  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3242  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3243 #if CONFIG_AVFILTER
3244  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
3245 #endif
3246  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3247  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3248  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3249  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3250  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3251  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3252  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3253  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3254  { NULL, },
3255 };
3256 
3257 static void show_usage(void)
3258 {
3259  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3260  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3261  av_log(NULL, AV_LOG_INFO, "\n");
3262 }
3263 
3264 void show_help_default(const char *opt, const char *arg)
3265 {
3267  show_usage();
3268  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3269  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3270  printf("\n");
3273 #if !CONFIG_AVFILTER
3275 #else
3277 #endif
3278  printf("\nWhile playing:\n"
3279  "q, ESC quit\n"
3280  "f toggle full screen\n"
3281  "p, SPC pause\n"
3282  "a cycle audio channel\n"
3283  "v cycle video channel\n"
3284  "t cycle subtitle channel\n"
3285  "w show audio waves\n"
3286  "s activate frame-step mode\n"
3287  "left/right seek backward/forward 10 seconds\n"
3288  "down/up seek backward/forward 1 minute\n"
3289  "page down/page up seek backward/forward 10 minutes\n"
3290  "mouse click seek to percentage in file corresponding to fraction of width\n"
3291  );
3292 }
3293 
3294 static int lockmgr(void **mtx, enum AVLockOp op)
3295 {
3296  switch(op) {
3297  case AV_LOCK_CREATE:
3298  *mtx = SDL_CreateMutex();
3299  if(!*mtx)
3300  return 1;
3301  return 0;
3302  case AV_LOCK_OBTAIN:
3303  return !!SDL_LockMutex(*mtx);
3304  case AV_LOCK_RELEASE:
3305  return !!SDL_UnlockMutex(*mtx);
3306  case AV_LOCK_DESTROY:
3307  SDL_DestroyMutex(*mtx);
3308  return 0;
3309  }
3310  return 1;
3311 }
3312 
3313 /* Called from the main */
3314 int main(int argc, char **argv)
3315 {
3316  int flags;
3317  VideoState *is;
3318  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3319 
3321  parse_loglevel(argc, argv, options);
3322 
3323  /* register all codecs, demux and protocols */
3325 #if CONFIG_AVDEVICE
3327 #endif
3328 #if CONFIG_AVFILTER
3330 #endif
3331  av_register_all();
3333 
3334  init_opts();
3335 
3336  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3337  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3338 
3339  show_banner(argc, argv, options);
3340 
3341  parse_options(NULL, argc, argv, options, opt_input_file);
3342 
3343  if (!input_filename) {
3344  show_usage();
3345  fprintf(stderr, "An input file must be specified\n");
3346  fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3347  exit(1);
3348  }
3349 
3350  if (display_disable) {
3351  video_disable = 1;
3352  }
3353  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3354  if (audio_disable)
3355  flags &= ~SDL_INIT_AUDIO;
3356  if (display_disable)
3357  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3358 #if !defined(__MINGW32__) && !defined(__APPLE__)
3359  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3360 #endif
3361  if (SDL_Init (flags)) {
3362  fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3363  fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3364  exit(1);
3365  }
3366 
3367  if (!display_disable) {
3368 #if HAVE_SDL_VIDEO_SIZE
3369  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3370  fs_screen_width = vi->current_w;
3371  fs_screen_height = vi->current_h;
3372 #endif
3373  }
3374 
3375  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3376  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3377  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3378 
3380  fprintf(stderr, "Could not initialize lock manager!\n");
3381  do_exit(NULL);
3382  }
3383 
3384  av_init_packet(&flush_pkt);
3385  flush_pkt.data = (char *)(intptr_t)"FLUSH";
3386 
3387  is = stream_open(input_filename, file_iformat);
3388  if (!is) {
3389  fprintf(stderr, "Failed to initialize VideoState!\n");
3390  do_exit(NULL);
3391  }
3392 
3393  event_loop(is);
3394 
3395  /* never returns */
3396 
3397  return 0;
3398 }