FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include "libavutil/avstring.h"
32 #include "libavutil/colorspace.h"
33 #include "libavutil/mathematics.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/imgutils.h"
36 #include "libavutil/dict.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/avassert.h"
40 #include "libavutil/time.h"
41 #include "libavformat/avformat.h"
42 #include "libavdevice/avdevice.h"
43 #include "libswscale/swscale.h"
44 #include "libavutil/opt.h"
45 #include "libavcodec/avfft.h"
47 
48 #if CONFIG_AVFILTER
49 # include "libavfilter/avcodec.h"
50 # include "libavfilter/avfilter.h"
51 # include "libavfilter/buffersink.h"
52 # include "libavfilter/buffersrc.h"
53 #endif
54 
55 #include <SDL.h>
56 #include <SDL_thread.h>
57 
58 #include "cmdutils.h"
59 
60 #include <assert.h>
61 
62 const char program_name[] = "ffplay";
63 const int program_birth_year = 2003;
64 
65 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66 #define MIN_FRAMES 5
67 
68 /* SDL audio buffer size, in samples. Should be small to have precise
69  A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71 
72 /* no AV sync correction is done if below the minimum AV sync threshold */
73 #define AV_SYNC_THRESHOLD_MIN 0.01
74 /* AV sync correction is done if above the maximum AV sync threshold */
75 #define AV_SYNC_THRESHOLD_MAX 0.1
76 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
77 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
78 /* no AV correction is done if too big error */
79 #define AV_NOSYNC_THRESHOLD 10.0
80 
81 /* maximum audio speed change to get correct sync */
82 #define SAMPLE_CORRECTION_PERCENT_MAX 10
83 
84 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
85 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
86 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
87 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
88 
89 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
90 #define AUDIO_DIFF_AVG_NB 20
91 
92 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
93 #define REFRESH_RATE 0.01
94 
95 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
96 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
97 #define SAMPLE_ARRAY_SIZE (8 * 65536)
98 
99 #define CURSOR_HIDE_DELAY 1000000
100 
101 static int64_t sws_flags = SWS_BICUBIC;
102 
103 typedef struct MyAVPacketList {
106  int serial;
108 
109 typedef struct PacketQueue {
112  int size;
114  int serial;
115  SDL_mutex *mutex;
116  SDL_cond *cond;
117 } PacketQueue;
118 
119 #define VIDEO_PICTURE_QUEUE_SIZE 3
120 #define SUBPICTURE_QUEUE_SIZE 4
121 
122 typedef struct VideoPicture {
123  double pts; // presentation timestamp for this picture
124  int64_t pos; // byte position in file
125  SDL_Overlay *bmp;
126  int width, height; /* source height & width */
129  int serial;
130 
132 } VideoPicture;
133 
134 typedef struct SubPicture {
135  double pts; /* presentation time stamp for this picture */
137  int serial;
138 } SubPicture;
139 
140 typedef struct AudioParams {
141  int freq;
142  int channels;
143  int64_t channel_layout;
145 } AudioParams;
146 
147 typedef struct Clock {
148  double pts; /* clock base */
149  double pts_drift; /* clock base minus time at which we updated the clock */
150  double last_updated;
151  double speed;
152  int serial; /* clock is based on a packet with this serial */
153  int paused;
154  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
155 } Clock;
156 
157 enum {
158  AV_SYNC_AUDIO_MASTER, /* default choice */
160  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
161 };
162 
163 typedef struct VideoState {
164  SDL_Thread *read_tid;
165  SDL_Thread *video_tid;
170  int paused;
173  int seek_req;
175  int64_t seek_pos;
176  int64_t seek_rel;
179  int realtime;
182 
186 
188 
190 
191  double audio_clock;
193  double audio_diff_cum; /* used for AV difference average computation */
203  unsigned int audio_buf_size; /* in bytes */
204  unsigned int audio_buf1_size;
205  int audio_buf_index; /* in bytes */
213 #if CONFIG_AVFILTER
214  struct AudioParams audio_filter_src;
215 #endif
222 
223  enum ShowMode {
225  } show_mode;
232  int xpos;
234 
235  SDL_Thread *subtitle_tid;
241  SDL_mutex *subpq_mutex;
242  SDL_cond *subpq_cond;
243 
244  double frame_timer;
255  int64_t video_current_pos; // current displayed file pos
256  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
259  SDL_mutex *pictq_mutex;
260  SDL_cond *pictq_cond;
261 #if !CONFIG_AVFILTER
263 #endif
265 
266  char filename[1024];
268  int step;
269 
270 #if CONFIG_AVFILTER
271  AVFilterContext *in_video_filter; // the first filter in the video chain
272  AVFilterContext *out_video_filter; // the last filter in the video chain
273  AVFilterContext *in_audio_filter; // the first filter in the audio chain
274  AVFilterContext *out_audio_filter; // the last filter in the audio chain
275  AVFilterGraph *agraph; // audio filter graph
276 #endif
277 
279 
281 } VideoState;
282 
283 /* options specified by the user */
285 static const char *input_filename;
286 static const char *window_title;
287 static int fs_screen_width;
288 static int fs_screen_height;
289 static int default_width = 640;
290 static int default_height = 480;
291 static int screen_width = 0;
292 static int screen_height = 0;
293 static int audio_disable;
294 static int video_disable;
295 static int subtitle_disable;
297  [AVMEDIA_TYPE_AUDIO] = -1,
298  [AVMEDIA_TYPE_VIDEO] = -1,
299  [AVMEDIA_TYPE_SUBTITLE] = -1,
300 };
301 static int seek_by_bytes = -1;
302 static int display_disable;
303 static int show_status = 1;
305 static int64_t start_time = AV_NOPTS_VALUE;
306 static int64_t duration = AV_NOPTS_VALUE;
307 static int workaround_bugs = 1;
308 static int fast = 0;
309 static int genpts = 0;
310 static int lowres = 0;
311 static int error_concealment = 3;
312 static int decoder_reorder_pts = -1;
313 static int autoexit;
314 static int exit_on_keydown;
315 static int exit_on_mousedown;
316 static int loop = 1;
317 static int framedrop = -1;
318 static int infinite_buffer = -1;
319 static enum ShowMode show_mode = SHOW_MODE_NONE;
320 static const char *audio_codec_name;
321 static const char *subtitle_codec_name;
322 static const char *video_codec_name;
323 double rdftspeed = 0.02;
324 static int64_t cursor_last_shown;
325 static int cursor_hidden = 0;
326 #if CONFIG_AVFILTER
327 static char *vfilters = NULL;
328 static char *afilters = NULL;
329 #endif
330 
331 /* current context */
332 static int is_full_screen;
333 static int64_t audio_callback_time;
334 
336 
337 #define FF_ALLOC_EVENT (SDL_USEREVENT)
338 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
339 
340 static SDL_Surface *screen;
341 
342 static inline
343 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
344  enum AVSampleFormat fmt2, int64_t channel_count2)
345 {
346  /* If channel count == 1, planar and non-planar formats are the same */
347  if (channel_count1 == 1 && channel_count2 == 1)
349  else
350  return channel_count1 != channel_count2 || fmt1 != fmt2;
351 }
352 
353 static inline
354 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
355 {
356  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
357  return channel_layout;
358  else
359  return 0;
360 }
361 
362 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
363 
365 {
366  MyAVPacketList *pkt1;
367 
368  if (q->abort_request)
369  return -1;
370 
371  pkt1 = av_malloc(sizeof(MyAVPacketList));
372  if (!pkt1)
373  return -1;
374  pkt1->pkt = *pkt;
375  pkt1->next = NULL;
376  if (pkt == &flush_pkt)
377  q->serial++;
378  pkt1->serial = q->serial;
379 
380  if (!q->last_pkt)
381  q->first_pkt = pkt1;
382  else
383  q->last_pkt->next = pkt1;
384  q->last_pkt = pkt1;
385  q->nb_packets++;
386  q->size += pkt1->pkt.size + sizeof(*pkt1);
387  /* XXX: should duplicate packet data in DV case */
388  SDL_CondSignal(q->cond);
389  return 0;
390 }
391 
393 {
394  int ret;
395 
396  /* duplicate the packet */
397  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
398  return -1;
399 
400  SDL_LockMutex(q->mutex);
401  ret = packet_queue_put_private(q, pkt);
402  SDL_UnlockMutex(q->mutex);
403 
404  if (pkt != &flush_pkt && ret < 0)
405  av_free_packet(pkt);
406 
407  return ret;
408 }
409 
410 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
411 {
412  AVPacket pkt1, *pkt = &pkt1;
413  av_init_packet(pkt);
414  pkt->data = NULL;
415  pkt->size = 0;
416  pkt->stream_index = stream_index;
417  return packet_queue_put(q, pkt);
418 }
419 
420 /* packet queue handling */
422 {
423  memset(q, 0, sizeof(PacketQueue));
424  q->mutex = SDL_CreateMutex();
425  q->cond = SDL_CreateCond();
426  q->abort_request = 1;
427 }
428 
430 {
431  MyAVPacketList *pkt, *pkt1;
432 
433  SDL_LockMutex(q->mutex);
434  for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
435  pkt1 = pkt->next;
436  av_free_packet(&pkt->pkt);
437  av_freep(&pkt);
438  }
439  q->last_pkt = NULL;
440  q->first_pkt = NULL;
441  q->nb_packets = 0;
442  q->size = 0;
443  SDL_UnlockMutex(q->mutex);
444 }
445 
447 {
449  SDL_DestroyMutex(q->mutex);
450  SDL_DestroyCond(q->cond);
451 }
452 
454 {
455  SDL_LockMutex(q->mutex);
456 
457  q->abort_request = 1;
458 
459  SDL_CondSignal(q->cond);
460 
461  SDL_UnlockMutex(q->mutex);
462 }
463 
465 {
466  SDL_LockMutex(q->mutex);
467  q->abort_request = 0;
468  packet_queue_put_private(q, &flush_pkt);
469  SDL_UnlockMutex(q->mutex);
470 }
471 
472 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
473 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
474 {
475  MyAVPacketList *pkt1;
476  int ret;
477 
478  SDL_LockMutex(q->mutex);
479 
480  for (;;) {
481  if (q->abort_request) {
482  ret = -1;
483  break;
484  }
485 
486  pkt1 = q->first_pkt;
487  if (pkt1) {
488  q->first_pkt = pkt1->next;
489  if (!q->first_pkt)
490  q->last_pkt = NULL;
491  q->nb_packets--;
492  q->size -= pkt1->pkt.size + sizeof(*pkt1);
493  *pkt = pkt1->pkt;
494  if (serial)
495  *serial = pkt1->serial;
496  av_free(pkt1);
497  ret = 1;
498  break;
499  } else if (!block) {
500  ret = 0;
501  break;
502  } else {
503  SDL_CondWait(q->cond, q->mutex);
504  }
505  }
506  SDL_UnlockMutex(q->mutex);
507  return ret;
508 }
509 
510 static inline void fill_rectangle(SDL_Surface *screen,
511  int x, int y, int w, int h, int color, int update)
512 {
513  SDL_Rect rect;
514  rect.x = x;
515  rect.y = y;
516  rect.w = w;
517  rect.h = h;
518  SDL_FillRect(screen, &rect, color);
519  if (update && w > 0 && h > 0)
520  SDL_UpdateRect(screen, x, y, w, h);
521 }
522 
523 /* draw only the border of a rectangle */
524 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
525 {
526  int w1, w2, h1, h2;
527 
528  /* fill the background */
529  w1 = x;
530  if (w1 < 0)
531  w1 = 0;
532  w2 = width - (x + w);
533  if (w2 < 0)
534  w2 = 0;
535  h1 = y;
536  if (h1 < 0)
537  h1 = 0;
538  h2 = height - (y + h);
539  if (h2 < 0)
540  h2 = 0;
542  xleft, ytop,
543  w1, height,
544  color, update);
546  xleft + width - w2, ytop,
547  w2, height,
548  color, update);
550  xleft + w1, ytop,
551  width - w1 - w2, h1,
552  color, update);
554  xleft + w1, ytop + height - h2,
555  width - w1 - w2, h2,
556  color, update);
557 }
558 
559 #define ALPHA_BLEND(a, oldp, newp, s)\
560 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
561 
562 #define RGBA_IN(r, g, b, a, s)\
563 {\
564  unsigned int v = ((const uint32_t *)(s))[0];\
565  a = (v >> 24) & 0xff;\
566  r = (v >> 16) & 0xff;\
567  g = (v >> 8) & 0xff;\
568  b = v & 0xff;\
569 }
570 
571 #define YUVA_IN(y, u, v, a, s, pal)\
572 {\
573  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
574  a = (val >> 24) & 0xff;\
575  y = (val >> 16) & 0xff;\
576  u = (val >> 8) & 0xff;\
577  v = val & 0xff;\
578 }
579 
580 #define YUVA_OUT(d, y, u, v, a)\
581 {\
582  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
583 }
584 
585 
586 #define BPP 1
587 
588 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
589 {
590  int wrap, wrap3, width2, skip2;
591  int y, u, v, a, u1, v1, a1, w, h;
592  uint8_t *lum, *cb, *cr;
593  const uint8_t *p;
594  const uint32_t *pal;
595  int dstx, dsty, dstw, dsth;
596 
597  dstw = av_clip(rect->w, 0, imgw);
598  dsth = av_clip(rect->h, 0, imgh);
599  dstx = av_clip(rect->x, 0, imgw - dstw);
600  dsty = av_clip(rect->y, 0, imgh - dsth);
601  lum = dst->data[0] + dsty * dst->linesize[0];
602  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
603  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
604 
605  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
606  skip2 = dstx >> 1;
607  wrap = dst->linesize[0];
608  wrap3 = rect->pict.linesize[0];
609  p = rect->pict.data[0];
610  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
611 
612  if (dsty & 1) {
613  lum += dstx;
614  cb += skip2;
615  cr += skip2;
616 
617  if (dstx & 1) {
618  YUVA_IN(y, u, v, a, p, pal);
619  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
621  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
622  cb++;
623  cr++;
624  lum++;
625  p += BPP;
626  }
627  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
628  YUVA_IN(y, u, v, a, p, pal);
629  u1 = u;
630  v1 = v;
631  a1 = a;
632  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
633 
634  YUVA_IN(y, u, v, a, p + BPP, pal);
635  u1 += u;
636  v1 += v;
637  a1 += a;
638  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
639  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
640  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
641  cb++;
642  cr++;
643  p += 2 * BPP;
644  lum += 2;
645  }
646  if (w) {
647  YUVA_IN(y, u, v, a, p, pal);
648  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
649  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
650  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
651  p++;
652  lum++;
653  }
654  p += wrap3 - dstw * BPP;
655  lum += wrap - dstw - dstx;
656  cb += dst->linesize[1] - width2 - skip2;
657  cr += dst->linesize[2] - width2 - skip2;
658  }
659  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
660  lum += dstx;
661  cb += skip2;
662  cr += skip2;
663 
664  if (dstx & 1) {
665  YUVA_IN(y, u, v, a, p, pal);
666  u1 = u;
667  v1 = v;
668  a1 = a;
669  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670  p += wrap3;
671  lum += wrap;
672  YUVA_IN(y, u, v, a, p, pal);
673  u1 += u;
674  v1 += v;
675  a1 += a;
676  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
677  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
678  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
679  cb++;
680  cr++;
681  p += -wrap3 + BPP;
682  lum += -wrap + 1;
683  }
684  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
685  YUVA_IN(y, u, v, a, p, pal);
686  u1 = u;
687  v1 = v;
688  a1 = a;
689  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
690 
691  YUVA_IN(y, u, v, a, p + BPP, pal);
692  u1 += u;
693  v1 += v;
694  a1 += a;
695  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
696  p += wrap3;
697  lum += wrap;
698 
699  YUVA_IN(y, u, v, a, p, pal);
700  u1 += u;
701  v1 += v;
702  a1 += a;
703  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
704 
705  YUVA_IN(y, u, v, a, p + BPP, pal);
706  u1 += u;
707  v1 += v;
708  a1 += a;
709  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
710 
711  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
712  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
713 
714  cb++;
715  cr++;
716  p += -wrap3 + 2 * BPP;
717  lum += -wrap + 2;
718  }
719  if (w) {
720  YUVA_IN(y, u, v, a, p, pal);
721  u1 = u;
722  v1 = v;
723  a1 = a;
724  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
725  p += wrap3;
726  lum += wrap;
727  YUVA_IN(y, u, v, a, p, pal);
728  u1 += u;
729  v1 += v;
730  a1 += a;
731  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
732  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
733  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
734  cb++;
735  cr++;
736  p += -wrap3 + BPP;
737  lum += -wrap + 1;
738  }
739  p += wrap3 + (wrap3 - dstw * BPP);
740  lum += wrap + (wrap - dstw - dstx);
741  cb += dst->linesize[1] - width2 - skip2;
742  cr += dst->linesize[2] - width2 - skip2;
743  }
744  /* handle odd height */
745  if (h) {
746  lum += dstx;
747  cb += skip2;
748  cr += skip2;
749 
750  if (dstx & 1) {
751  YUVA_IN(y, u, v, a, p, pal);
752  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
753  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
754  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
755  cb++;
756  cr++;
757  lum++;
758  p += BPP;
759  }
760  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
761  YUVA_IN(y, u, v, a, p, pal);
762  u1 = u;
763  v1 = v;
764  a1 = a;
765  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
766 
767  YUVA_IN(y, u, v, a, p + BPP, pal);
768  u1 += u;
769  v1 += v;
770  a1 += a;
771  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
772  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
773  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
774  cb++;
775  cr++;
776  p += 2 * BPP;
777  lum += 2;
778  }
779  if (w) {
780  YUVA_IN(y, u, v, a, p, pal);
781  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
782  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
783  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
784  }
785  }
786 }
787 
788 static void free_picture(VideoPicture *vp)
789 {
790  if (vp->bmp) {
791  SDL_FreeYUVOverlay(vp->bmp);
792  vp->bmp = NULL;
793  }
794 }
795 
797 {
798  avsubtitle_free(&sp->sub);
799 }
800 
801 static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, VideoPicture *vp)
802 {
803  float aspect_ratio;
804  int width, height, x, y;
805 
806  if (vp->sar.num == 0)
807  aspect_ratio = 0;
808  else
809  aspect_ratio = av_q2d(vp->sar);
810 
811  if (aspect_ratio <= 0.0)
812  aspect_ratio = 1.0;
813  aspect_ratio *= (float)vp->width / (float)vp->height;
814 
815  /* XXX: we suppose the screen has a 1.0 pixel ratio */
816  height = scr_height;
817  width = ((int)rint(height * aspect_ratio)) & ~1;
818  if (width > scr_width) {
819  width = scr_width;
820  height = ((int)rint(width / aspect_ratio)) & ~1;
821  }
822  x = (scr_width - width) / 2;
823  y = (scr_height - height) / 2;
824  rect->x = scr_xleft + x;
825  rect->y = scr_ytop + y;
826  rect->w = FFMAX(width, 1);
827  rect->h = FFMAX(height, 1);
828 }
829 
831 {
832  VideoPicture *vp;
833  SubPicture *sp;
834  AVPicture pict;
835  SDL_Rect rect;
836  int i;
837 
838  vp = &is->pictq[is->pictq_rindex];
839  if (vp->bmp) {
840  if (is->subtitle_st) {
841  if (is->subpq_size > 0) {
842  sp = &is->subpq[is->subpq_rindex];
843 
844  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
845  SDL_LockYUVOverlay (vp->bmp);
846 
847  pict.data[0] = vp->bmp->pixels[0];
848  pict.data[1] = vp->bmp->pixels[2];
849  pict.data[2] = vp->bmp->pixels[1];
850 
851  pict.linesize[0] = vp->bmp->pitches[0];
852  pict.linesize[1] = vp->bmp->pitches[2];
853  pict.linesize[2] = vp->bmp->pitches[1];
854 
855  for (i = 0; i < sp->sub.num_rects; i++)
856  blend_subrect(&pict, sp->sub.rects[i],
857  vp->bmp->w, vp->bmp->h);
858 
859  SDL_UnlockYUVOverlay (vp->bmp);
860  }
861  }
862  }
863 
864  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp);
865 
866  SDL_DisplayYUVOverlay(vp->bmp, &rect);
867 
868  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
869  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
870  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
871  is->last_display_rect = rect;
872  }
873  }
874 }
875 
876 static inline int compute_mod(int a, int b)
877 {
878  return a < 0 ? a%b + b : a%b;
879 }
880 
882 {
883  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
884  int ch, channels, h, h2, bgcolor, fgcolor;
885  int64_t time_diff;
886  int rdft_bits, nb_freq;
887 
888  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
889  ;
890  nb_freq = 1 << (rdft_bits - 1);
891 
892  /* compute display index : center on currently output samples */
893  channels = s->audio_tgt.channels;
894  nb_display_channels = channels;
895  if (!s->paused) {
896  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
897  n = 2 * channels;
898  delay = s->audio_write_buf_size;
899  delay /= n;
900 
901  /* to be more precise, we take into account the time spent since
902  the last buffer computation */
903  if (audio_callback_time) {
904  time_diff = av_gettime() - audio_callback_time;
905  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
906  }
907 
908  delay += 2 * data_used;
909  if (delay < data_used)
910  delay = data_used;
911 
912  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
913  if (s->show_mode == SHOW_MODE_WAVES) {
914  h = INT_MIN;
915  for (i = 0; i < 1000; i += channels) {
916  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
917  int a = s->sample_array[idx];
918  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
919  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
920  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
921  int score = a - d;
922  if (h < score && (b ^ c) < 0) {
923  h = score;
924  i_start = idx;
925  }
926  }
927  }
928 
929  s->last_i_start = i_start;
930  } else {
931  i_start = s->last_i_start;
932  }
933 
934  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
935  if (s->show_mode == SHOW_MODE_WAVES) {
937  s->xleft, s->ytop, s->width, s->height,
938  bgcolor, 0);
939 
940  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
941 
942  /* total height for one channel */
943  h = s->height / nb_display_channels;
944  /* graph height / 2 */
945  h2 = (h * 9) / 20;
946  for (ch = 0; ch < nb_display_channels; ch++) {
947  i = i_start + ch;
948  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
949  for (x = 0; x < s->width; x++) {
950  y = (s->sample_array[i] * h2) >> 15;
951  if (y < 0) {
952  y = -y;
953  ys = y1 - y;
954  } else {
955  ys = y1;
956  }
958  s->xleft + x, ys, 1, y,
959  fgcolor, 0);
960  i += channels;
961  if (i >= SAMPLE_ARRAY_SIZE)
962  i -= SAMPLE_ARRAY_SIZE;
963  }
964  }
965 
966  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
967 
968  for (ch = 1; ch < nb_display_channels; ch++) {
969  y = s->ytop + ch * h;
971  s->xleft, y, s->width, 1,
972  fgcolor, 0);
973  }
974  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
975  } else {
976  nb_display_channels= FFMIN(nb_display_channels, 2);
977  if (rdft_bits != s->rdft_bits) {
978  av_rdft_end(s->rdft);
979  av_free(s->rdft_data);
980  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
981  s->rdft_bits = rdft_bits;
982  s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
983  }
984  {
985  FFTSample *data[2];
986  for (ch = 0; ch < nb_display_channels; ch++) {
987  data[ch] = s->rdft_data + 2 * nb_freq * ch;
988  i = i_start + ch;
989  for (x = 0; x < 2 * nb_freq; x++) {
990  double w = (x-nb_freq) * (1.0 / nb_freq);
991  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
992  i += channels;
993  if (i >= SAMPLE_ARRAY_SIZE)
994  i -= SAMPLE_ARRAY_SIZE;
995  }
996  av_rdft_calc(s->rdft, data[ch]);
997  }
998  /* Least efficient way to do this, we should of course
999  * directly access it but it is more than fast enough. */
1000  for (y = 0; y < s->height; y++) {
1001  double w = 1 / sqrt(nb_freq);
1002  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1003  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1004  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1005  a = FFMIN(a, 255);
1006  b = FFMIN(b, 255);
1007  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1008 
1010  s->xpos, s->height-y, 1, 1,
1011  fgcolor, 0);
1012  }
1013  }
1014  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1015  if (!s->paused)
1016  s->xpos++;
1017  if (s->xpos >= s->width)
1018  s->xpos= s->xleft;
1019  }
1020 }
1021 
1022 static void stream_close(VideoState *is)
1023 {
1024  int i;
1025  /* XXX: use a special url_shutdown call to abort parse cleanly */
1026  is->abort_request = 1;
1027  SDL_WaitThread(is->read_tid, NULL);
1031 
1032  /* free all pictures */
1033  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
1034  free_picture(&is->pictq[i]);
1035  for (i = 0; i < SUBPICTURE_QUEUE_SIZE; i++)
1036  free_subpicture(&is->subpq[i]);
1037  SDL_DestroyMutex(is->pictq_mutex);
1038  SDL_DestroyCond(is->pictq_cond);
1039  SDL_DestroyMutex(is->subpq_mutex);
1040  SDL_DestroyCond(is->subpq_cond);
1041  SDL_DestroyCond(is->continue_read_thread);
1042 #if !CONFIG_AVFILTER
1044 #endif
1045  av_free(is);
1046 }
1047 
1048 static void do_exit(VideoState *is)
1049 {
1050  if (is) {
1051  stream_close(is);
1052  }
1053  av_lockmgr_register(NULL);
1054  uninit_opts();
1055 #if CONFIG_AVFILTER
1056  av_freep(&vfilters);
1057 #endif
1059  if (show_status)
1060  printf("\n");
1061  SDL_Quit();
1062  av_log(NULL, AV_LOG_QUIET, "%s", "");
1063  exit(0);
1064 }
1065 
1066 static void sigterm_handler(int sig)
1067 {
1068  exit(123);
1069 }
1070 
1071 static int video_open(VideoState *is, int force_set_video_mode, VideoPicture *vp)
1072 {
1073  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1074  int w,h;
1075  SDL_Rect rect;
1076 
1077  if (is_full_screen) flags |= SDL_FULLSCREEN;
1078  else flags |= SDL_RESIZABLE;
1079 
1080  if (vp && vp->width) {
1081  calculate_display_rect(&rect, 0, 0, INT_MAX, vp->height, vp);
1082  default_width = rect.w;
1083  default_height = rect.h;
1084  }
1085 
1087  w = fs_screen_width;
1088  h = fs_screen_height;
1089  } else if (!is_full_screen && screen_width) {
1090  w = screen_width;
1091  h = screen_height;
1092  } else {
1093  w = default_width;
1094  h = default_height;
1095  }
1096  w = FFMIN(16383, w);
1097  if (screen && is->width == screen->w && screen->w == w
1098  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1099  return 0;
1100  screen = SDL_SetVideoMode(w, h, 0, flags);
1101  if (!screen) {
1102  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1103  do_exit(is);
1104  }
1105  if (!window_title)
1107  SDL_WM_SetCaption(window_title, window_title);
1108 
1109  is->width = screen->w;
1110  is->height = screen->h;
1111 
1112  return 0;
1113 }
1114 
1115 /* display the current picture, if any */
1116 static void video_display(VideoState *is)
1117 {
1118  if (!screen)
1119  video_open(is, 0, NULL);
1120  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1121  video_audio_display(is);
1122  else if (is->video_st)
1123  video_image_display(is);
1124 }
1125 
1126 static double get_clock(Clock *c)
1127 {
1128  if (*c->queue_serial != c->serial)
1129  return NAN;
1130  if (c->paused) {
1131  return c->pts;
1132  } else {
1133  double time = av_gettime() / 1000000.0;
1134  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1135  }
1136 }
1137 
1138 static void set_clock_at(Clock *c, double pts, int serial, double time)
1139 {
1140  c->pts = pts;
1141  c->last_updated = time;
1142  c->pts_drift = c->pts - time;
1143  c->serial = serial;
1144 }
1145 
1146 static void set_clock(Clock *c, double pts, int serial)
1147 {
1148  double time = av_gettime() / 1000000.0;
1149  set_clock_at(c, pts, serial, time);
1150 }
1151 
1152 static void set_clock_speed(Clock *c, double speed)
1153 {
1154  set_clock(c, get_clock(c), c->serial);
1155  c->speed = speed;
1156 }
1157 
1158 static void init_clock(Clock *c, int *queue_serial)
1159 {
1160  c->speed = 1.0;
1161  c->paused = 0;
1162  c->queue_serial = queue_serial;
1163  set_clock(c, NAN, -1);
1164 }
1165 
1166 static void sync_clock_to_slave(Clock *c, Clock *slave)
1167 {
1168  double clock = get_clock(c);
1169  double slave_clock = get_clock(slave);
1170  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1171  set_clock(c, slave_clock, slave->serial);
1172 }
1173 
1175  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1176  if (is->video_st)
1177  return AV_SYNC_VIDEO_MASTER;
1178  else
1179  return AV_SYNC_AUDIO_MASTER;
1180  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1181  if (is->audio_st)
1182  return AV_SYNC_AUDIO_MASTER;
1183  else
1184  return AV_SYNC_EXTERNAL_CLOCK;
1185  } else {
1186  return AV_SYNC_EXTERNAL_CLOCK;
1187  }
1188 }
1189 
1190 /* get the current master clock value */
1191 static double get_master_clock(VideoState *is)
1192 {
1193  double val;
1194 
1195  switch (get_master_sync_type(is)) {
1196  case AV_SYNC_VIDEO_MASTER:
1197  val = get_clock(&is->vidclk);
1198  break;
1199  case AV_SYNC_AUDIO_MASTER:
1200  val = get_clock(&is->audclk);
1201  break;
1202  default:
1203  val = get_clock(&is->extclk);
1204  break;
1205  }
1206  return val;
1207 }
1208 
1210  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1211  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1213  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1214  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1216  } else {
1217  double speed = is->extclk.speed;
1218  if (speed != 1.0)
1219  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1220  }
1221 }
1222 
1223 /* seek in the stream */
1224 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1225 {
1226  if (!is->seek_req) {
1227  is->seek_pos = pos;
1228  is->seek_rel = rel;
1229  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1230  if (seek_by_bytes)
1232  is->seek_req = 1;
1233  SDL_CondSignal(is->continue_read_thread);
1234  }
1235 }
1236 
1237 /* pause or resume the video */
1239 {
1240  if (is->paused) {
1241  is->frame_timer += av_gettime() / 1000000.0 + is->vidclk.pts_drift - is->vidclk.pts;
1242  if (is->read_pause_return != AVERROR(ENOSYS)) {
1243  is->vidclk.paused = 0;
1244  }
1245  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1246  }
1247  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1248  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1249 }
1250 
1251 static void toggle_pause(VideoState *is)
1252 {
1253  stream_toggle_pause(is);
1254  is->step = 0;
1255 }
1256 
1258 {
1259  /* if the stream is paused unpause it, then step */
1260  if (is->paused)
1261  stream_toggle_pause(is);
1262  is->step = 1;
1263 }
1264 
1265 static double compute_target_delay(double delay, VideoState *is)
1266 {
1267  double sync_threshold, diff;
1268 
1269  /* update delay to follow master synchronisation source */
1271  /* if video is slave, we try to correct big delays by
1272  duplicating or deleting a frame */
1273  diff = get_clock(&is->vidclk) - get_master_clock(is);
1274 
1275  /* skip or repeat frame. We take into account the
1276  delay to compute the threshold. I still don't know
1277  if it is the best guess */
1278  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1279  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1280  if (diff <= -sync_threshold)
1281  delay = FFMAX(0, delay + diff);
1282  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1283  delay = delay + diff;
1284  else if (diff >= sync_threshold)
1285  delay = 2 * delay;
1286  }
1287  }
1288 
1289  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1290  delay, -diff);
1291 
1292  return delay;
1293 }
1294 
1295 static void pictq_next_picture(VideoState *is) {
1296  /* update queue size and signal for next picture */
1298  is->pictq_rindex = 0;
1299 
1300  SDL_LockMutex(is->pictq_mutex);
1301  is->pictq_size--;
1302  SDL_CondSignal(is->pictq_cond);
1303  SDL_UnlockMutex(is->pictq_mutex);
1304 }
1305 
1307  VideoPicture *prevvp;
1308  int ret = 0;
1309  /* update queue size and signal for the previous picture */
1311  if (prevvp->allocated && prevvp->serial == is->videoq.serial) {
1312  SDL_LockMutex(is->pictq_mutex);
1314  if (--is->pictq_rindex == -1)
1316  is->pictq_size++;
1317  ret = 1;
1318  }
1319  SDL_CondSignal(is->pictq_cond);
1320  SDL_UnlockMutex(is->pictq_mutex);
1321  }
1322  return ret;
1323 }
1324 
1325 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1326  /* update current video pts */
1327  set_clock(&is->vidclk, pts, serial);
1328  sync_clock_to_slave(&is->extclk, &is->vidclk);
1329  is->video_current_pos = pos;
1330  is->frame_last_pts = pts;
1331 }
1332 
1333 /* called to display each frame */
1334 static void video_refresh(void *opaque, double *remaining_time)
1335 {
1336  VideoState *is = opaque;
1337  VideoPicture *vp;
1338  double time;
1339 
1340  SubPicture *sp, *sp2;
1341 
1342  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1344 
1345  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1346  time = av_gettime() / 1000000.0;
1347  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1348  video_display(is);
1349  is->last_vis_time = time;
1350  }
1351  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1352  }
1353 
1354  if (is->video_st) {
1355  int redisplay = 0;
1356  if (is->force_refresh)
1357  redisplay = pictq_prev_picture(is);
1358 retry:
1359  if (is->pictq_size == 0) {
1360  SDL_LockMutex(is->pictq_mutex);
1364  }
1365  SDL_UnlockMutex(is->pictq_mutex);
1366  // nothing to do, no picture to display in the queue
1367  } else {
1368  double last_duration, duration, delay;
1369  /* dequeue the picture */
1370  vp = &is->pictq[is->pictq_rindex];
1371 
1372  if (vp->serial != is->videoq.serial) {
1373  pictq_next_picture(is);
1374  redisplay = 0;
1375  goto retry;
1376  }
1377 
1378  if (is->paused)
1379  goto display;
1380 
1381  /* compute nominal last_duration */
1382  last_duration = vp->pts - is->frame_last_pts;
1383  if (!isnan(last_duration) && last_duration > 0 && last_duration < is->max_frame_duration) {
1384  /* if duration of the last frame was sane, update last_duration in video state */
1385  is->frame_last_duration = last_duration;
1386  }
1387  if (redisplay)
1388  delay = 0.0;
1389  else
1390  delay = compute_target_delay(is->frame_last_duration, is);
1391 
1392  time= av_gettime()/1000000.0;
1393  if (time < is->frame_timer + delay && !redisplay) {
1394  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1395  return;
1396  }
1397 
1398  is->frame_timer += delay;
1399  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1400  is->frame_timer = time;
1401 
1402  SDL_LockMutex(is->pictq_mutex);
1403  if (!redisplay && !isnan(vp->pts))
1404  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1405  SDL_UnlockMutex(is->pictq_mutex);
1406 
1407  if (is->pictq_size > 1) {
1408  VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1409  duration = nextvp->pts - vp->pts;
1410  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1411  if (!redisplay)
1412  is->frame_drops_late++;
1413  pictq_next_picture(is);
1414  redisplay = 0;
1415  goto retry;
1416  }
1417  }
1418 
1419  if (is->subtitle_st) {
1420  while (is->subpq_size > 0) {
1421  sp = &is->subpq[is->subpq_rindex];
1422 
1423  if (is->subpq_size > 1)
1424  sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1425  else
1426  sp2 = NULL;
1427 
1428  if (sp->serial != is->subtitleq.serial
1429  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1430  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1431  {
1432  free_subpicture(sp);
1433 
1434  /* update queue size and signal for next picture */
1435  if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1436  is->subpq_rindex = 0;
1437 
1438  SDL_LockMutex(is->subpq_mutex);
1439  is->subpq_size--;
1440  SDL_CondSignal(is->subpq_cond);
1441  SDL_UnlockMutex(is->subpq_mutex);
1442  } else {
1443  break;
1444  }
1445  }
1446  }
1447 
1448 display:
1449  /* display picture */
1450  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1451  video_display(is);
1452 
1453  pictq_next_picture(is);
1454 
1455  if (is->step && !is->paused)
1456  stream_toggle_pause(is);
1457  }
1458  }
1459  is->force_refresh = 0;
1460  if (show_status) {
1461  static int64_t last_time;
1462  int64_t cur_time;
1463  int aqsize, vqsize, sqsize;
1464  double av_diff;
1465 
1466  cur_time = av_gettime();
1467  if (!last_time || (cur_time - last_time) >= 30000) {
1468  aqsize = 0;
1469  vqsize = 0;
1470  sqsize = 0;
1471  if (is->audio_st)
1472  aqsize = is->audioq.size;
1473  if (is->video_st)
1474  vqsize = is->videoq.size;
1475  if (is->subtitle_st)
1476  sqsize = is->subtitleq.size;
1477  av_diff = 0;
1478  if (is->audio_st && is->video_st)
1479  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1480  else if (is->video_st)
1481  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1482  else if (is->audio_st)
1483  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1484  av_log(NULL, AV_LOG_INFO,
1485  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1486  get_master_clock(is),
1487  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1488  av_diff,
1490  aqsize / 1024,
1491  vqsize / 1024,
1492  sqsize,
1495  fflush(stdout);
1496  last_time = cur_time;
1497  }
1498  }
1499 }
1500 
1501 /* allocate a picture (needs to do that in main thread to avoid
1502  potential locking problems */
1503 static void alloc_picture(VideoState *is)
1504 {
1505  VideoPicture *vp;
1506  int64_t bufferdiff;
1507 
1508  vp = &is->pictq[is->pictq_windex];
1509 
1510  free_picture(vp);
1511 
1512  video_open(is, 0, vp);
1513 
1514  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1515  SDL_YV12_OVERLAY,
1516  screen);
1517  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1518  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1519  /* SDL allocates a buffer smaller than requested if the video
1520  * overlay hardware is unable to support the requested size. */
1521  av_log(NULL, AV_LOG_FATAL,
1522  "Error: the video system does not support an image\n"
1523  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1524  "to reduce the image size.\n", vp->width, vp->height );
1525  do_exit(is);
1526  }
1527 
1528  SDL_LockMutex(is->pictq_mutex);
1529  vp->allocated = 1;
1530  SDL_CondSignal(is->pictq_cond);
1531  SDL_UnlockMutex(is->pictq_mutex);
1532 }
1533 
1534 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1535  int i, width, height;
1536  Uint8 *p, *maxp;
1537  for (i = 0; i < 3; i++) {
1538  width = bmp->w;
1539  height = bmp->h;
1540  if (i > 0) {
1541  width >>= 1;
1542  height >>= 1;
1543  }
1544  if (bmp->pitches[i] > width) {
1545  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1546  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1547  *(p+1) = *p;
1548  }
1549  }
1550 }
1551 
1552 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos, int serial)
1553 {
1554  VideoPicture *vp;
1555 
1556 #if defined(DEBUG_SYNC) && 0
1557  printf("frame_type=%c pts=%0.3f\n",
1558  av_get_picture_type_char(src_frame->pict_type), pts);
1559 #endif
1560 
1561  /* wait until we have space to put a new picture */
1562  SDL_LockMutex(is->pictq_mutex);
1563 
1564  /* keep the last already displayed picture in the queue */
1565  while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE - 1 &&
1566  !is->videoq.abort_request) {
1567  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1568  }
1569  SDL_UnlockMutex(is->pictq_mutex);
1570 
1571  if (is->videoq.abort_request)
1572  return -1;
1573 
1574  vp = &is->pictq[is->pictq_windex];
1575 
1576  vp->sar = src_frame->sample_aspect_ratio;
1577 
1578  /* alloc or resize hardware picture buffer */
1579  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1580  vp->width != src_frame->width ||
1581  vp->height != src_frame->height) {
1582  SDL_Event event;
1583 
1584  vp->allocated = 0;
1585  vp->reallocate = 0;
1586  vp->width = src_frame->width;
1587  vp->height = src_frame->height;
1588 
1589  /* the allocation must be done in the main thread to avoid
1590  locking problems. */
1591  event.type = FF_ALLOC_EVENT;
1592  event.user.data1 = is;
1593  SDL_PushEvent(&event);
1594 
1595  /* wait until the picture is allocated */
1596  SDL_LockMutex(is->pictq_mutex);
1597  while (!vp->allocated && !is->videoq.abort_request) {
1598  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1599  }
1600  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1601  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1602  while (!vp->allocated) {
1603  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1604  }
1605  }
1606  SDL_UnlockMutex(is->pictq_mutex);
1607 
1608  if (is->videoq.abort_request)
1609  return -1;
1610  }
1611 
1612  /* if the frame is not skipped, then display it */
1613  if (vp->bmp) {
1614  AVPicture pict = { { 0 } };
1615 
1616  /* get a pointer on the bitmap */
1617  SDL_LockYUVOverlay (vp->bmp);
1618 
1619  pict.data[0] = vp->bmp->pixels[0];
1620  pict.data[1] = vp->bmp->pixels[2];
1621  pict.data[2] = vp->bmp->pixels[1];
1622 
1623  pict.linesize[0] = vp->bmp->pitches[0];
1624  pict.linesize[1] = vp->bmp->pitches[2];
1625  pict.linesize[2] = vp->bmp->pitches[1];
1626 
1627 #if CONFIG_AVFILTER
1628  // FIXME use direct rendering
1629  av_picture_copy(&pict, (AVPicture *)src_frame,
1630  src_frame->format, vp->width, vp->height);
1631 #else
1632  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1634  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1635  AV_PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1636  if (is->img_convert_ctx == NULL) {
1637  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1638  exit(1);
1639  }
1640  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1641  0, vp->height, pict.data, pict.linesize);
1642 #endif
1643  /* workaround SDL PITCH_WORKAROUND */
1645  /* update the bitmap content */
1646  SDL_UnlockYUVOverlay(vp->bmp);
1647 
1648  vp->pts = pts;
1649  vp->pos = pos;
1650  vp->serial = serial;
1651 
1652  /* now we can update the picture count */
1654  is->pictq_windex = 0;
1655  SDL_LockMutex(is->pictq_mutex);
1656  is->pictq_size++;
1657  SDL_UnlockMutex(is->pictq_mutex);
1658  }
1659  return 0;
1660 }
1661 
1662 static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
1663 {
1664  int got_picture;
1665 
1666  if (packet_queue_get(&is->videoq, pkt, 1, serial) < 0)
1667  return -1;
1668 
1669  if (pkt->data == flush_pkt.data) {
1671 
1672  SDL_LockMutex(is->pictq_mutex);
1673  // Make sure there are no long delay timers (ideally we should just flush the queue but that's harder)
1674  while (is->pictq_size && !is->videoq.abort_request) {
1675  SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1676  }
1677  is->video_current_pos = -1;
1679  is->frame_last_duration = 0;
1680  is->frame_timer = (double)av_gettime() / 1000000.0;
1682  SDL_UnlockMutex(is->pictq_mutex);
1683  return 0;
1684  }
1685 
1686  if(avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt) < 0)
1687  return 0;
1688 
1689  if (!got_picture && !pkt->data)
1690  is->video_finished = *serial;
1691 
1692  if (got_picture) {
1693  int ret = 1;
1694  double dpts = NAN;
1695 
1696  if (decoder_reorder_pts == -1) {
1697  frame->pts = av_frame_get_best_effort_timestamp(frame);
1698  } else if (decoder_reorder_pts) {
1699  frame->pts = frame->pkt_pts;
1700  } else {
1701  frame->pts = frame->pkt_dts;
1702  }
1703 
1704  if (frame->pts != AV_NOPTS_VALUE)
1705  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1706 
1707  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1708 
1710  SDL_LockMutex(is->pictq_mutex);
1711  if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE) {
1712  double clockdiff = get_clock(&is->vidclk) - get_master_clock(is);
1713  double ptsdiff = dpts - is->frame_last_pts;
1714  if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
1715  !isnan(ptsdiff) && ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
1716  clockdiff + ptsdiff - is->frame_last_filter_delay < 0 &&
1717  is->videoq.nb_packets) {
1719  is->frame_last_dropped_pts = dpts;
1720  is->frame_last_dropped_serial = *serial;
1721  is->frame_drops_early++;
1722  av_frame_unref(frame);
1723  ret = 0;
1724  }
1725  }
1726  SDL_UnlockMutex(is->pictq_mutex);
1727  }
1728 
1729  return ret;
1730  }
1731  return 0;
1732 }
1733 
1734 #if CONFIG_AVFILTER
1735 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1736  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1737 {
1738  int ret;
1739  AVFilterInOut *outputs = NULL, *inputs = NULL;
1740 
1741  if (filtergraph) {
1742  outputs = avfilter_inout_alloc();
1743  inputs = avfilter_inout_alloc();
1744  if (!outputs || !inputs) {
1745  ret = AVERROR(ENOMEM);
1746  goto fail;
1747  }
1748 
1749  outputs->name = av_strdup("in");
1750  outputs->filter_ctx = source_ctx;
1751  outputs->pad_idx = 0;
1752  outputs->next = NULL;
1753 
1754  inputs->name = av_strdup("out");
1755  inputs->filter_ctx = sink_ctx;
1756  inputs->pad_idx = 0;
1757  inputs->next = NULL;
1758 
1759  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1760  goto fail;
1761  } else {
1762  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1763  goto fail;
1764  }
1765 
1766  ret = avfilter_graph_config(graph, NULL);
1767 fail:
1768  avfilter_inout_free(&outputs);
1769  avfilter_inout_free(&inputs);
1770  return ret;
1771 }
1772 
1773 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1774 {
1775  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1776  char sws_flags_str[128];
1777  char buffersrc_args[256];
1778  int ret;
1779  AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_crop;
1780  AVCodecContext *codec = is->video_st->codec;
1781  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1782 
1783  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1784  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1785  graph->scale_sws_opts = av_strdup(sws_flags_str);
1786 
1787  snprintf(buffersrc_args, sizeof(buffersrc_args),
1788  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1789  frame->width, frame->height, frame->format,
1791  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1792  if (fr.num && fr.den)
1793  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1794 
1795  if ((ret = avfilter_graph_create_filter(&filt_src,
1796  avfilter_get_by_name("buffer"),
1797  "ffplay_buffer", buffersrc_args, NULL,
1798  graph)) < 0)
1799  goto fail;
1800 
1801  ret = avfilter_graph_create_filter(&filt_out,
1802  avfilter_get_by_name("buffersink"),
1803  "ffplay_buffersink", NULL, NULL, graph);
1804  if (ret < 0)
1805  goto fail;
1806 
1807  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1808  goto fail;
1809 
1810  /* SDL YUV code is not handling odd width/height for some driver
1811  * combinations, therefore we crop the picture to an even width/height. */
1812  if ((ret = avfilter_graph_create_filter(&filt_crop,
1813  avfilter_get_by_name("crop"),
1814  "ffplay_crop", "floor(in_w/2)*2:floor(in_h/2)*2", NULL, graph)) < 0)
1815  goto fail;
1816  if ((ret = avfilter_link(filt_crop, 0, filt_out, 0)) < 0)
1817  goto fail;
1818 
1819  if ((ret = configure_filtergraph(graph, vfilters, filt_src, filt_crop)) < 0)
1820  goto fail;
1821 
1822  is->in_video_filter = filt_src;
1823  is->out_video_filter = filt_out;
1824 
1825 fail:
1826  return ret;
1827 }
1828 
1829 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1830 {
1832  int sample_rates[2] = { 0, -1 };
1833  int64_t channel_layouts[2] = { 0, -1 };
1834  int channels[2] = { 0, -1 };
1835  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1836  char aresample_swr_opts[512] = "";
1837  AVDictionaryEntry *e = NULL;
1838  char asrc_args[256];
1839  int ret;
1840 
1841  avfilter_graph_free(&is->agraph);
1842  if (!(is->agraph = avfilter_graph_alloc()))
1843  return AVERROR(ENOMEM);
1844 
1845  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1846  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1847  if (strlen(aresample_swr_opts))
1848  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1849  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1850 
1851  ret = snprintf(asrc_args, sizeof(asrc_args),
1852  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1853  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1854  is->audio_filter_src.channels,
1855  1, is->audio_filter_src.freq);
1856  if (is->audio_filter_src.channel_layout)
1857  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1858  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1859 
1860  ret = avfilter_graph_create_filter(&filt_asrc,
1861  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1862  asrc_args, NULL, is->agraph);
1863  if (ret < 0)
1864  goto end;
1865 
1866 
1867  ret = avfilter_graph_create_filter(&filt_asink,
1868  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1869  NULL, NULL, is->agraph);
1870  if (ret < 0)
1871  goto end;
1872 
1873  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1874  goto end;
1875  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1876  goto end;
1877 
1878  if (force_output_format) {
1879  channel_layouts[0] = is->audio_tgt.channel_layout;
1880  channels [0] = is->audio_tgt.channels;
1881  sample_rates [0] = is->audio_tgt.freq;
1882  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1883  goto end;
1884  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1885  goto end;
1886  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1887  goto end;
1888  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1889  goto end;
1890  }
1891 
1892 
1893  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1894  goto end;
1895 
1896  is->in_audio_filter = filt_asrc;
1897  is->out_audio_filter = filt_asink;
1898 
1899 end:
1900  if (ret < 0)
1901  avfilter_graph_free(&is->agraph);
1902  return ret;
1903 }
1904 #endif /* CONFIG_AVFILTER */
1905 
1906 static int video_thread(void *arg)
1907 {
1908  AVPacket pkt = { 0 };
1909  VideoState *is = arg;
1910  AVFrame *frame = av_frame_alloc();
1911  double pts;
1912  int ret;
1913  int serial = 0;
1914 
1915 #if CONFIG_AVFILTER
1917  AVFilterContext *filt_out = NULL, *filt_in = NULL;
1918  int last_w = 0;
1919  int last_h = 0;
1920  enum AVPixelFormat last_format = -2;
1921  int last_serial = -1;
1922 #endif
1923 
1924  for (;;) {
1925  while (is->paused && !is->videoq.abort_request)
1926  SDL_Delay(10);
1927 
1929  av_free_packet(&pkt);
1930 
1931  ret = get_video_frame(is, frame, &pkt, &serial);
1932  if (ret < 0)
1933  goto the_end;
1934  if (!ret)
1935  continue;
1936 
1937 #if CONFIG_AVFILTER
1938  if ( last_w != frame->width
1939  || last_h != frame->height
1940  || last_format != frame->format
1941  || last_serial != serial) {
1942  av_log(NULL, AV_LOG_DEBUG,
1943  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
1944  last_w, last_h,
1945  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
1946  frame->width, frame->height,
1947  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), serial);
1948  avfilter_graph_free(&graph);
1949  graph = avfilter_graph_alloc();
1950  if ((ret = configure_video_filters(graph, is, vfilters, frame)) < 0) {
1951  SDL_Event event;
1952  event.type = FF_QUIT_EVENT;
1953  event.user.data1 = is;
1954  SDL_PushEvent(&event);
1955  av_free_packet(&pkt);
1956  goto the_end;
1957  }
1958  filt_in = is->in_video_filter;
1959  filt_out = is->out_video_filter;
1960  last_w = frame->width;
1961  last_h = frame->height;
1962  last_format = frame->format;
1963  last_serial = serial;
1964  }
1965 
1966  ret = av_buffersrc_add_frame(filt_in, frame);
1967  if (ret < 0)
1968  goto the_end;
1969  av_frame_unref(frame);
1971  av_free_packet(&pkt);
1972 
1973  while (ret >= 0) {
1974  is->frame_last_returned_time = av_gettime() / 1000000.0;
1975 
1976  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
1977  if (ret < 0) {
1978  if (ret == AVERROR_EOF)
1979  is->video_finished = serial;
1980  ret = 0;
1981  break;
1982  }
1983 
1985  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
1986  is->frame_last_filter_delay = 0;
1987 
1988  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(filt_out->inputs[0]->time_base);
1989  ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1990  av_frame_unref(frame);
1991  }
1992 #else
1993  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(is->video_st->time_base);
1994  ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
1995  av_frame_unref(frame);
1996 #endif
1997 
1998  if (ret < 0)
1999  goto the_end;
2000  }
2001  the_end:
2003 #if CONFIG_AVFILTER
2004  avfilter_graph_free(&graph);
2005 #endif
2006  av_free_packet(&pkt);
2007  av_frame_free(&frame);
2008  return 0;
2009 }
2010 
2011 static int subtitle_thread(void *arg)
2012 {
2013  VideoState *is = arg;
2014  SubPicture *sp;
2015  AVPacket pkt1, *pkt = &pkt1;
2016  int got_subtitle;
2017  int serial;
2018  double pts;
2019  int i, j;
2020  int r, g, b, y, u, v, a;
2021 
2022  for (;;) {
2023  while (is->paused && !is->subtitleq.abort_request) {
2024  SDL_Delay(10);
2025  }
2026  if (packet_queue_get(&is->subtitleq, pkt, 1, &serial) < 0)
2027  break;
2028 
2029  if (pkt->data == flush_pkt.data) {
2031  continue;
2032  }
2033  SDL_LockMutex(is->subpq_mutex);
2034  while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
2035  !is->subtitleq.abort_request) {
2036  SDL_CondWait(is->subpq_cond, is->subpq_mutex);
2037  }
2038  SDL_UnlockMutex(is->subpq_mutex);
2039 
2040  if (is->subtitleq.abort_request)
2041  return 0;
2042 
2043  sp = &is->subpq[is->subpq_windex];
2044 
2045  /* NOTE: ipts is the PTS of the _first_ picture beginning in
2046  this packet, if any */
2047  pts = 0;
2048  if (pkt->pts != AV_NOPTS_VALUE)
2049  pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
2050 
2052  &got_subtitle, pkt);
2053  if (got_subtitle && sp->sub.format == 0) {
2054  if (sp->sub.pts != AV_NOPTS_VALUE)
2055  pts = sp->sub.pts / (double)AV_TIME_BASE;
2056  sp->pts = pts;
2057  sp->serial = serial;
2058 
2059  for (i = 0; i < sp->sub.num_rects; i++)
2060  {
2061  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2062  {
2063  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2064  y = RGB_TO_Y_CCIR(r, g, b);
2065  u = RGB_TO_U_CCIR(r, g, b, 0);
2066  v = RGB_TO_V_CCIR(r, g, b, 0);
2067  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2068  }
2069  }
2070 
2071  /* now we can update the picture count */
2072  if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
2073  is->subpq_windex = 0;
2074  SDL_LockMutex(is->subpq_mutex);
2075  is->subpq_size++;
2076  SDL_UnlockMutex(is->subpq_mutex);
2077  } else if (got_subtitle) {
2078  avsubtitle_free(&sp->sub);
2079  }
2080  av_free_packet(pkt);
2081  }
2082  return 0;
2083 }
2084 
2085 /* copy samples for viewing in editor window */
2086 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2087 {
2088  int size, len;
2089 
2090  size = samples_size / sizeof(short);
2091  while (size > 0) {
2093  if (len > size)
2094  len = size;
2095  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2096  samples += len;
2097  is->sample_array_index += len;
2099  is->sample_array_index = 0;
2100  size -= len;
2101  }
2102 }
2103 
2104 /* return the wanted number of samples to get better sync if sync_type is video
2105  * or external master clock */
2106 static int synchronize_audio(VideoState *is, int nb_samples)
2107 {
2108  int wanted_nb_samples = nb_samples;
2109 
2110  /* if not master, then we try to remove or add samples to correct the clock */
2112  double diff, avg_diff;
2113  int min_nb_samples, max_nb_samples;
2114 
2115  diff = get_clock(&is->audclk) - get_master_clock(is);
2116 
2117  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2118  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2120  /* not enough measures to have a correct estimate */
2121  is->audio_diff_avg_count++;
2122  } else {
2123  /* estimate the A-V difference */
2124  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2125 
2126  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2127  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2128  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2129  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2130  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2131  }
2132  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2133  diff, avg_diff, wanted_nb_samples - nb_samples,
2135  }
2136  } else {
2137  /* too big difference : may be initial PTS errors, so
2138  reset A-V filter */
2139  is->audio_diff_avg_count = 0;
2140  is->audio_diff_cum = 0;
2141  }
2142  }
2143 
2144  return wanted_nb_samples;
2145 }
2146 
2147 /**
2148  * Decode one audio frame and return its uncompressed size.
2149  *
2150  * The processed audio frame is decoded, converted if required, and
2151  * stored in is->audio_buf, with size in bytes given by the return
2152  * value.
2153  */
2155 {
2156  AVPacket *pkt_temp = &is->audio_pkt_temp;
2157  AVPacket *pkt = &is->audio_pkt;
2158  AVCodecContext *dec = is->audio_st->codec;
2159  int len1, data_size, resampled_data_size;
2160  int64_t dec_channel_layout;
2161  int got_frame;
2162  av_unused double audio_clock0;
2163  int wanted_nb_samples;
2164  AVRational tb;
2165  int ret;
2166  int reconfigure;
2167 
2168  for (;;) {
2169  /* NOTE: the audio packet can contain several frames */
2170  while (pkt_temp->stream_index != -1 || is->audio_buf_frames_pending) {
2171  if (!is->frame) {
2172  if (!(is->frame = avcodec_alloc_frame()))
2173  return AVERROR(ENOMEM);
2174  } else {
2175  av_frame_unref(is->frame);
2177  }
2178 
2179  if (is->audioq.serial != is->audio_pkt_temp_serial)
2180  break;
2181 
2182  if (is->paused)
2183  return -1;
2184 
2185  if (!is->audio_buf_frames_pending) {
2186  len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
2187  if (len1 < 0) {
2188  /* if error, we skip the frame */
2189  pkt_temp->size = 0;
2190  break;
2191  }
2192 
2193  pkt_temp->dts =
2194  pkt_temp->pts = AV_NOPTS_VALUE;
2195  pkt_temp->data += len1;
2196  pkt_temp->size -= len1;
2197  if (pkt_temp->data && pkt_temp->size <= 0 || !pkt_temp->data && !got_frame)
2198  pkt_temp->stream_index = -1;
2199  if (!pkt_temp->data && !got_frame)
2201 
2202  if (!got_frame)
2203  continue;
2204 
2205  tb = (AVRational){1, is->frame->sample_rate};
2206  if (is->frame->pts != AV_NOPTS_VALUE)
2207  is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
2208  else if (is->frame->pkt_pts != AV_NOPTS_VALUE)
2209  is->frame->pts = av_rescale_q(is->frame->pkt_pts, is->audio_st->time_base, tb);
2210  else if (is->audio_frame_next_pts != AV_NOPTS_VALUE)
2211 #if CONFIG_AVFILTER
2212  is->frame->pts = av_rescale_q(is->audio_frame_next_pts, (AVRational){1, is->audio_filter_src.freq}, tb);
2213 #else
2215 #endif
2216 
2217  if (is->frame->pts != AV_NOPTS_VALUE)
2218  is->audio_frame_next_pts = is->frame->pts + is->frame->nb_samples;
2219 
2220 #if CONFIG_AVFILTER
2221  dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
2222 
2223  reconfigure =
2224  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2225  is->frame->format, av_frame_get_channels(is->frame)) ||
2226  is->audio_filter_src.channel_layout != dec_channel_layout ||
2227  is->audio_filter_src.freq != is->frame->sample_rate ||
2229 
2230  if (reconfigure) {
2231  char buf1[1024], buf2[1024];
2232  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2233  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2234  av_log(NULL, AV_LOG_DEBUG,
2235  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2236  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, is->audio_last_serial,
2238 
2239  is->audio_filter_src.fmt = is->frame->format;
2240  is->audio_filter_src.channels = av_frame_get_channels(is->frame);
2241  is->audio_filter_src.channel_layout = dec_channel_layout;
2242  is->audio_filter_src.freq = is->frame->sample_rate;
2244 
2245  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2246  return ret;
2247  }
2248 
2249  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
2250  return ret;
2251  av_frame_unref(is->frame);
2252 #endif
2253  }
2254 #if CONFIG_AVFILTER
2255  if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
2256  if (ret == AVERROR(EAGAIN)) {
2257  is->audio_buf_frames_pending = 0;
2258  continue;
2259  }
2260  if (ret == AVERROR_EOF)
2262  return ret;
2263  }
2264  is->audio_buf_frames_pending = 1;
2265  tb = is->out_audio_filter->inputs[0]->time_base;
2266 #endif
2267 
2269  is->frame->nb_samples,
2270  is->frame->format, 1);
2271 
2272  dec_channel_layout =
2275  wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
2276 
2277  if (is->frame->format != is->audio_src.fmt ||
2278  dec_channel_layout != is->audio_src.channel_layout ||
2279  is->frame->sample_rate != is->audio_src.freq ||
2280  (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
2281  swr_free(&is->swr_ctx);
2282  is->swr_ctx = swr_alloc_set_opts(NULL,
2284  dec_channel_layout, is->frame->format, is->frame->sample_rate,
2285  0, NULL);
2286  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2287  av_log(NULL, AV_LOG_ERROR,
2288  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2291  break;
2292  }
2293  is->audio_src.channel_layout = dec_channel_layout;
2295  is->audio_src.freq = is->frame->sample_rate;
2296  is->audio_src.fmt = is->frame->format;
2297  }
2298 
2299  if (is->swr_ctx) {
2300  const uint8_t **in = (const uint8_t **)is->frame->extended_data;
2301  uint8_t **out = &is->audio_buf1;
2302  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate + 256;
2303  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2304  int len2;
2305  if (out_size < 0) {
2306  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2307  break;
2308  }
2309  if (wanted_nb_samples != is->frame->nb_samples) {
2310  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt.freq / is->frame->sample_rate,
2311  wanted_nb_samples * is->audio_tgt.freq / is->frame->sample_rate) < 0) {
2312  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2313  break;
2314  }
2315  }
2316  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2317  if (!is->audio_buf1)
2318  return AVERROR(ENOMEM);
2319  len2 = swr_convert(is->swr_ctx, out, out_count, in, is->frame->nb_samples);
2320  if (len2 < 0) {
2321  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2322  break;
2323  }
2324  if (len2 == out_count) {
2325  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2326  swr_init(is->swr_ctx);
2327  }
2328  is->audio_buf = is->audio_buf1;
2329  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2330  } else {
2331  is->audio_buf = is->frame->data[0];
2332  resampled_data_size = data_size;
2333  }
2334 
2335  audio_clock0 = is->audio_clock;
2336  /* update the audio clock with the pts */
2337  if (is->frame->pts != AV_NOPTS_VALUE)
2338  is->audio_clock = is->frame->pts * av_q2d(tb) + (double) is->frame->nb_samples / is->frame->sample_rate;
2339  else
2340  is->audio_clock = NAN;
2342 #ifdef DEBUG
2343  {
2344  static double last_clock;
2345  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2346  is->audio_clock - last_clock,
2347  is->audio_clock, audio_clock0);
2348  last_clock = is->audio_clock;
2349  }
2350 #endif
2351  return resampled_data_size;
2352  }
2353 
2354  /* free the current packet */
2355  if (pkt->data)
2357  memset(pkt_temp, 0, sizeof(*pkt_temp));
2358  pkt_temp->stream_index = -1;
2359 
2360  if (is->audioq.abort_request) {
2361  return -1;
2362  }
2363 
2364  if (is->audioq.nb_packets == 0)
2365  SDL_CondSignal(is->continue_read_thread);
2366 
2367  /* read next packet */
2368  if ((packet_queue_get(&is->audioq, pkt, 1, &is->audio_pkt_temp_serial)) < 0)
2369  return -1;
2370 
2371  if (pkt->data == flush_pkt.data) {
2372  avcodec_flush_buffers(dec);
2373  is->audio_buf_frames_pending = 0;
2377  }
2378 
2379  *pkt_temp = *pkt;
2380  }
2381 }
2382 
2383 /* prepare a new audio buffer */
2384 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2385 {
2386  VideoState *is = opaque;
2387  int audio_size, len1;
2388  int bytes_per_sec;
2390 
2392 
2393  while (len > 0) {
2394  if (is->audio_buf_index >= is->audio_buf_size) {
2395  audio_size = audio_decode_frame(is);
2396  if (audio_size < 0) {
2397  /* if error, just output silence */
2398  is->audio_buf = is->silence_buf;
2399  is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
2400  } else {
2401  if (is->show_mode != SHOW_MODE_VIDEO)
2402  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2403  is->audio_buf_size = audio_size;
2404  }
2405  is->audio_buf_index = 0;
2406  }
2407  len1 = is->audio_buf_size - is->audio_buf_index;
2408  if (len1 > len)
2409  len1 = len;
2410  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2411  len -= len1;
2412  stream += len1;
2413  is->audio_buf_index += len1;
2414  }
2415  bytes_per_sec = is->audio_tgt.freq * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2417  /* Let's assume the audio driver that is used by SDL has two periods. */
2418  if (!isnan(is->audio_clock)) {
2419  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2420  sync_clock_to_slave(&is->extclk, &is->audclk);
2421  }
2422 }
2423 
2424 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2425 {
2426  SDL_AudioSpec wanted_spec, spec;
2427  const char *env;
2428  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2429 
2430  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2431  if (env) {
2432  wanted_nb_channels = atoi(env);
2433  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2434  }
2435  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2436  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2437  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2438  }
2439  wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2440  wanted_spec.freq = wanted_sample_rate;
2441  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2442  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2443  return -1;
2444  }
2445  wanted_spec.format = AUDIO_S16SYS;
2446  wanted_spec.silence = 0;
2447  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2448  wanted_spec.callback = sdl_audio_callback;
2449  wanted_spec.userdata = opaque;
2450  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2451  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels): %s\n", wanted_spec.channels, SDL_GetError());
2452  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2453  if (!wanted_spec.channels) {
2454  av_log(NULL, AV_LOG_ERROR,
2455  "No more channel combinations to try, audio open failed\n");
2456  return -1;
2457  }
2458  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2459  }
2460  if (spec.format != AUDIO_S16SYS) {
2461  av_log(NULL, AV_LOG_ERROR,
2462  "SDL advised audio format %d is not supported!\n", spec.format);
2463  return -1;
2464  }
2465  if (spec.channels != wanted_spec.channels) {
2466  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2467  if (!wanted_channel_layout) {
2468  av_log(NULL, AV_LOG_ERROR,
2469  "SDL advised channel count %d is not supported!\n", spec.channels);
2470  return -1;
2471  }
2472  }
2473 
2474  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2475  audio_hw_params->freq = spec.freq;
2476  audio_hw_params->channel_layout = wanted_channel_layout;
2477  audio_hw_params->channels = spec.channels;
2478  return spec.size;
2479 }
2480 
2481 /* open a given stream. Return 0 if OK */
2482 static int stream_component_open(VideoState *is, int stream_index)
2483 {
2484  AVFormatContext *ic = is->ic;
2485  AVCodecContext *avctx;
2486  AVCodec *codec;
2487  const char *forced_codec_name = NULL;
2488  AVDictionary *opts;
2489  AVDictionaryEntry *t = NULL;
2490  int sample_rate, nb_channels;
2491  int64_t channel_layout;
2492  int ret;
2493  int stream_lowres = lowres;
2494 
2495  if (stream_index < 0 || stream_index >= ic->nb_streams)
2496  return -1;
2497  avctx = ic->streams[stream_index]->codec;
2498 
2499  codec = avcodec_find_decoder(avctx->codec_id);
2500 
2501  switch(avctx->codec_type){
2502  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2503  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2504  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2505  }
2506  if (forced_codec_name)
2507  codec = avcodec_find_decoder_by_name(forced_codec_name);
2508  if (!codec) {
2509  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2510  "No codec could be found with name '%s'\n", forced_codec_name);
2511  else av_log(NULL, AV_LOG_WARNING,
2512  "No codec could be found with id %d\n", avctx->codec_id);
2513  return -1;
2514  }
2515 
2516  avctx->codec_id = codec->id;
2518  if(stream_lowres > av_codec_get_max_lowres(codec)){
2519  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2520  av_codec_get_max_lowres(codec));
2521  stream_lowres = av_codec_get_max_lowres(codec);
2522  }
2523  av_codec_set_lowres(avctx, stream_lowres);
2525 
2526  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2527  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2528  if(codec->capabilities & CODEC_CAP_DR1)
2529  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2530 
2531  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2532  if (!av_dict_get(opts, "threads", NULL, 0))
2533  av_dict_set(&opts, "threads", "auto", 0);
2534  if (stream_lowres)
2535  av_dict_set(&opts, "lowres", av_asprintf("%d", stream_lowres), AV_DICT_DONT_STRDUP_VAL);
2536  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2537  av_dict_set(&opts, "refcounted_frames", "1", 0);
2538  if (avcodec_open2(avctx, codec, &opts) < 0)
2539  return -1;
2540  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2541  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2542  return AVERROR_OPTION_NOT_FOUND;
2543  }
2544 
2545  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2546  switch (avctx->codec_type) {
2547  case AVMEDIA_TYPE_AUDIO:
2548 #if CONFIG_AVFILTER
2549  {
2550  AVFilterLink *link;
2551 
2552  is->audio_filter_src.freq = avctx->sample_rate;
2553  is->audio_filter_src.channels = avctx->channels;
2554  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2555  is->audio_filter_src.fmt = avctx->sample_fmt;
2556  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2557  return ret;
2558  link = is->out_audio_filter->inputs[0];
2559  sample_rate = link->sample_rate;
2560  nb_channels = link->channels;
2561  channel_layout = link->channel_layout;
2562  }
2563 #else
2564  sample_rate = avctx->sample_rate;
2565  nb_channels = avctx->channels;
2566  channel_layout = avctx->channel_layout;
2567 #endif
2568 
2569  /* prepare audio output */
2570  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2571  return ret;
2572  is->audio_hw_buf_size = ret;
2573  is->audio_src = is->audio_tgt;
2574  is->audio_buf_size = 0;
2575  is->audio_buf_index = 0;
2576 
2577  /* init averaging filter */
2578  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2579  is->audio_diff_avg_count = 0;
2580  /* since we do not have a precise anough audio fifo fullness,
2581  we correct audio sync only if larger than this threshold */
2583 
2584  memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2585  memset(&is->audio_pkt_temp, 0, sizeof(is->audio_pkt_temp));
2586  is->audio_pkt_temp.stream_index = -1;
2587 
2588  is->audio_stream = stream_index;
2589  is->audio_st = ic->streams[stream_index];
2590 
2591  packet_queue_start(&is->audioq);
2592  SDL_PauseAudio(0);
2593  break;
2594  case AVMEDIA_TYPE_VIDEO:
2595  is->video_stream = stream_index;
2596  is->video_st = ic->streams[stream_index];
2597 
2598  packet_queue_start(&is->videoq);
2599  is->video_tid = SDL_CreateThread(video_thread, is);
2600  is->queue_attachments_req = 1;
2601  break;
2602  case AVMEDIA_TYPE_SUBTITLE:
2603  is->subtitle_stream = stream_index;
2604  is->subtitle_st = ic->streams[stream_index];
2606 
2607  is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2608  break;
2609  default:
2610  break;
2611  }
2612  return 0;
2613 }
2614 
2615 static void stream_component_close(VideoState *is, int stream_index)
2616 {
2617  AVFormatContext *ic = is->ic;
2618  AVCodecContext *avctx;
2619 
2620  if (stream_index < 0 || stream_index >= ic->nb_streams)
2621  return;
2622  avctx = ic->streams[stream_index]->codec;
2623 
2624  switch (avctx->codec_type) {
2625  case AVMEDIA_TYPE_AUDIO:
2626  packet_queue_abort(&is->audioq);
2627 
2628  SDL_CloseAudio();
2629 
2630  packet_queue_flush(&is->audioq);
2631  av_free_packet(&is->audio_pkt);
2632  swr_free(&is->swr_ctx);
2633  av_freep(&is->audio_buf1);
2634  is->audio_buf1_size = 0;
2635  is->audio_buf = NULL;
2636  av_frame_free(&is->frame);
2637 
2638  if (is->rdft) {
2639  av_rdft_end(is->rdft);
2640  av_freep(&is->rdft_data);
2641  is->rdft = NULL;
2642  is->rdft_bits = 0;
2643  }
2644 #if CONFIG_AVFILTER
2645  avfilter_graph_free(&is->agraph);
2646 #endif
2647  break;
2648  case AVMEDIA_TYPE_VIDEO:
2649  packet_queue_abort(&is->videoq);
2650 
2651  /* note: we also signal this mutex to make sure we deblock the
2652  video thread in all cases */
2653  SDL_LockMutex(is->pictq_mutex);
2654  SDL_CondSignal(is->pictq_cond);
2655  SDL_UnlockMutex(is->pictq_mutex);
2656 
2657  SDL_WaitThread(is->video_tid, NULL);
2658 
2659  packet_queue_flush(&is->videoq);
2660  break;
2661  case AVMEDIA_TYPE_SUBTITLE:
2663 
2664  /* note: we also signal this mutex to make sure we deblock the
2665  video thread in all cases */
2666  SDL_LockMutex(is->subpq_mutex);
2667  SDL_CondSignal(is->subpq_cond);
2668  SDL_UnlockMutex(is->subpq_mutex);
2669 
2670  SDL_WaitThread(is->subtitle_tid, NULL);
2671 
2673  break;
2674  default:
2675  break;
2676  }
2677 
2678  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2679  avcodec_close(avctx);
2680  switch (avctx->codec_type) {
2681  case AVMEDIA_TYPE_AUDIO:
2682  is->audio_st = NULL;
2683  is->audio_stream = -1;
2684  break;
2685  case AVMEDIA_TYPE_VIDEO:
2686  is->video_st = NULL;
2687  is->video_stream = -1;
2688  break;
2689  case AVMEDIA_TYPE_SUBTITLE:
2690  is->subtitle_st = NULL;
2691  is->subtitle_stream = -1;
2692  break;
2693  default:
2694  break;
2695  }
2696 }
2697 
2698 static int decode_interrupt_cb(void *ctx)
2699 {
2700  VideoState *is = ctx;
2701  return is->abort_request;
2702 }
2703 
2705 {
2706  if( !strcmp(s->iformat->name, "rtp")
2707  || !strcmp(s->iformat->name, "rtsp")
2708  || !strcmp(s->iformat->name, "sdp")
2709  )
2710  return 1;
2711 
2712  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2713  || !strncmp(s->filename, "udp:", 4)
2714  )
2715  )
2716  return 1;
2717  return 0;
2718 }
2719 
2720 /* this thread gets the stream from the disk or the network */
2721 static int read_thread(void *arg)
2722 {
2723  VideoState *is = arg;
2724  AVFormatContext *ic = NULL;
2725  int err, i, ret;
2726  int st_index[AVMEDIA_TYPE_NB];
2727  AVPacket pkt1, *pkt = &pkt1;
2728  int eof = 0;
2729  int64_t stream_start_time;
2730  int pkt_in_play_range = 0;
2732  AVDictionary **opts;
2733  int orig_nb_streams;
2734  SDL_mutex *wait_mutex = SDL_CreateMutex();
2735 
2736  memset(st_index, -1, sizeof(st_index));
2737  is->last_video_stream = is->video_stream = -1;
2738  is->last_audio_stream = is->audio_stream = -1;
2739  is->last_subtitle_stream = is->subtitle_stream = -1;
2740 
2741  ic = avformat_alloc_context();
2743  ic->interrupt_callback.opaque = is;
2744  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2745  if (err < 0) {
2746  print_error(is->filename, err);
2747  ret = -1;
2748  goto fail;
2749  }
2750  if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2751  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2753  goto fail;
2754  }
2755  is->ic = ic;
2756 
2757  if (genpts)
2758  ic->flags |= AVFMT_FLAG_GENPTS;
2759 
2761  orig_nb_streams = ic->nb_streams;
2762 
2763  err = avformat_find_stream_info(ic, opts);
2764  if (err < 0) {
2765  av_log(NULL, AV_LOG_WARNING,
2766  "%s: could not find codec parameters\n", is->filename);
2767  ret = -1;
2768  goto fail;
2769  }
2770  for (i = 0; i < orig_nb_streams; i++)
2771  av_dict_free(&opts[i]);
2772  av_freep(&opts);
2773 
2774  if (ic->pb)
2775  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
2776 
2777  if (seek_by_bytes < 0)
2778  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2779 
2780  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2781 
2782  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2783  window_title = av_asprintf("%s - %s", t->value, input_filename);
2784 
2785  /* if seeking requested, we execute it */
2786  if (start_time != AV_NOPTS_VALUE) {
2787  int64_t timestamp;
2788 
2789  timestamp = start_time;
2790  /* add the stream start time */
2791  if (ic->start_time != AV_NOPTS_VALUE)
2792  timestamp += ic->start_time;
2793  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2794  if (ret < 0) {
2795  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2796  is->filename, (double)timestamp / AV_TIME_BASE);
2797  }
2798  }
2799 
2800  is->realtime = is_realtime(ic);
2801 
2802  for (i = 0; i < ic->nb_streams; i++)
2803  ic->streams[i]->discard = AVDISCARD_ALL;
2804  if (!video_disable)
2805  st_index[AVMEDIA_TYPE_VIDEO] =
2807  wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2808  if (!audio_disable)
2809  st_index[AVMEDIA_TYPE_AUDIO] =
2812  st_index[AVMEDIA_TYPE_VIDEO],
2813  NULL, 0);
2815  st_index[AVMEDIA_TYPE_SUBTITLE] =
2818  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2819  st_index[AVMEDIA_TYPE_AUDIO] :
2820  st_index[AVMEDIA_TYPE_VIDEO]),
2821  NULL, 0);
2822  if (show_status) {
2823  av_dump_format(ic, 0, is->filename, 0);
2824  }
2825 
2826  is->show_mode = show_mode;
2827 
2828  /* open the streams */
2829  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2830  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2831  }
2832 
2833  ret = -1;
2834  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2835  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2836  }
2837  if (is->show_mode == SHOW_MODE_NONE)
2838  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2839 
2840  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2841  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2842  }
2843 
2844  if (is->video_stream < 0 && is->audio_stream < 0) {
2845  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2846  is->filename);
2847  ret = -1;
2848  goto fail;
2849  }
2850 
2851  if (infinite_buffer < 0 && is->realtime)
2852  infinite_buffer = 1;
2853 
2854  for (;;) {
2855  if (is->abort_request)
2856  break;
2857  if (is->paused != is->last_paused) {
2858  is->last_paused = is->paused;
2859  if (is->paused)
2860  is->read_pause_return = av_read_pause(ic);
2861  else
2862  av_read_play(ic);
2863  }
2864 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2865  if (is->paused &&
2866  (!strcmp(ic->iformat->name, "rtsp") ||
2867  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2868  /* wait 10 ms to avoid trying to get another packet */
2869  /* XXX: horrible */
2870  SDL_Delay(10);
2871  continue;
2872  }
2873 #endif
2874  if (is->seek_req) {
2875  int64_t seek_target = is->seek_pos;
2876  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2877  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2878 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2879 // of the seek_pos/seek_rel variables
2880 
2881  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2882  if (ret < 0) {
2883  av_log(NULL, AV_LOG_ERROR,
2884  "%s: error while seeking\n", is->ic->filename);
2885  } else {
2886  if (is->audio_stream >= 0) {
2887  packet_queue_flush(&is->audioq);
2888  packet_queue_put(&is->audioq, &flush_pkt);
2889  }
2890  if (is->subtitle_stream >= 0) {
2892  packet_queue_put(&is->subtitleq, &flush_pkt);
2893  }
2894  if (is->video_stream >= 0) {
2895  packet_queue_flush(&is->videoq);
2896  packet_queue_put(&is->videoq, &flush_pkt);
2897  }
2898  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2899  set_clock(&is->extclk, NAN, 0);
2900  } else {
2901  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2902  }
2903  }
2904  is->seek_req = 0;
2905  is->queue_attachments_req = 1;
2906  eof = 0;
2907  if (is->paused)
2908  step_to_next_frame(is);
2909  }
2910  if (is->queue_attachments_req) {
2912  AVPacket copy;
2913  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2914  goto fail;
2915  packet_queue_put(&is->videoq, &copy);
2917  }
2918  is->queue_attachments_req = 0;
2919  }
2920 
2921  /* if the queue are full, no need to read more */
2922  if (infinite_buffer<1 &&
2923  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2924  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
2925  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
2927  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
2928  /* wait 10 ms */
2929  SDL_LockMutex(wait_mutex);
2930  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2931  SDL_UnlockMutex(wait_mutex);
2932  continue;
2933  }
2934  if (!is->paused &&
2935  (!is->audio_st || is->audio_finished == is->audioq.serial) &&
2936  (!is->video_st || (is->video_finished == is->videoq.serial && is->pictq_size == 0))) {
2937  if (loop != 1 && (!loop || --loop)) {
2938  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2939  } else if (autoexit) {
2940  ret = AVERROR_EOF;
2941  goto fail;
2942  }
2943  }
2944  if (eof) {
2945  if (is->video_stream >= 0)
2947  if (is->audio_stream >= 0)
2949  SDL_Delay(10);
2950  eof=0;
2951  continue;
2952  }
2953  ret = av_read_frame(ic, pkt);
2954  if (ret < 0) {
2955  if (ret == AVERROR_EOF || url_feof(ic->pb))
2956  eof = 1;
2957  if (ic->pb && ic->pb->error)
2958  break;
2959  SDL_LockMutex(wait_mutex);
2960  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2961  SDL_UnlockMutex(wait_mutex);
2962  continue;
2963  }
2964  /* check if packet is in play range specified by user, then queue, otherwise discard */
2965  stream_start_time = ic->streams[pkt->stream_index]->start_time;
2966  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2967  (pkt->pts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
2968  av_q2d(ic->streams[pkt->stream_index]->time_base) -
2969  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2970  <= ((double)duration / 1000000);
2971  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2972  packet_queue_put(&is->audioq, pkt);
2973  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
2975  packet_queue_put(&is->videoq, pkt);
2976  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2977  packet_queue_put(&is->subtitleq, pkt);
2978  } else {
2979  av_free_packet(pkt);
2980  }
2981  }
2982  /* wait until the end */
2983  while (!is->abort_request) {
2984  SDL_Delay(100);
2985  }
2986 
2987  ret = 0;
2988  fail:
2989  /* close each stream */
2990  if (is->audio_stream >= 0)
2992  if (is->video_stream >= 0)
2994  if (is->subtitle_stream >= 0)
2996  if (is->ic) {
2997  avformat_close_input(&is->ic);
2998  }
2999 
3000  if (ret != 0) {
3001  SDL_Event event;
3002 
3003  event.type = FF_QUIT_EVENT;
3004  event.user.data1 = is;
3005  SDL_PushEvent(&event);
3006  }
3007  SDL_DestroyMutex(wait_mutex);
3008  return 0;
3009 }
3010 
3011 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3012 {
3013  VideoState *is;
3014 
3015  is = av_mallocz(sizeof(VideoState));
3016  if (!is)
3017  return NULL;
3018  av_strlcpy(is->filename, filename, sizeof(is->filename));
3019  is->iformat = iformat;
3020  is->ytop = 0;
3021  is->xleft = 0;
3022 
3023  /* start video display */
3024  is->pictq_mutex = SDL_CreateMutex();
3025  is->pictq_cond = SDL_CreateCond();
3026 
3027  is->subpq_mutex = SDL_CreateMutex();
3028  is->subpq_cond = SDL_CreateCond();
3029 
3030  packet_queue_init(&is->videoq);
3031  packet_queue_init(&is->audioq);
3033 
3034  is->continue_read_thread = SDL_CreateCond();
3035 
3036  init_clock(&is->vidclk, &is->videoq.serial);
3037  init_clock(&is->audclk, &is->audioq.serial);
3038  init_clock(&is->extclk, &is->extclk.serial);
3039  is->audio_clock_serial = -1;
3040  is->audio_last_serial = -1;
3041  is->av_sync_type = av_sync_type;
3042  is->read_tid = SDL_CreateThread(read_thread, is);
3043  if (!is->read_tid) {
3044  av_free(is);
3045  return NULL;
3046  }
3047  return is;
3048 }
3049 
3051 {
3052  AVFormatContext *ic = is->ic;
3053  int start_index, stream_index;
3054  int old_index;
3055  AVStream *st;
3056  AVProgram *p = NULL;
3057  int nb_streams = is->ic->nb_streams;
3058 
3059  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3060  start_index = is->last_video_stream;
3061  old_index = is->video_stream;
3062  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3063  start_index = is->last_audio_stream;
3064  old_index = is->audio_stream;
3065  } else {
3066  start_index = is->last_subtitle_stream;
3067  old_index = is->subtitle_stream;
3068  }
3069  stream_index = start_index;
3070 
3071  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3072  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3073  if (p) {
3074  nb_streams = p->nb_stream_indexes;
3075  for (start_index = 0; start_index < nb_streams; start_index++)
3076  if (p->stream_index[start_index] == stream_index)
3077  break;
3078  if (start_index == nb_streams)
3079  start_index = -1;
3080  stream_index = start_index;
3081  }
3082  }
3083 
3084  for (;;) {
3085  if (++stream_index >= nb_streams)
3086  {
3087  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3088  {
3089  stream_index = -1;
3090  is->last_subtitle_stream = -1;
3091  goto the_end;
3092  }
3093  if (start_index == -1)
3094  return;
3095  stream_index = 0;
3096  }
3097  if (stream_index == start_index)
3098  return;
3099  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3100  if (st->codec->codec_type == codec_type) {
3101  /* check that parameters are OK */
3102  switch (codec_type) {
3103  case AVMEDIA_TYPE_AUDIO:
3104  if (st->codec->sample_rate != 0 &&
3105  st->codec->channels != 0)
3106  goto the_end;
3107  break;
3108  case AVMEDIA_TYPE_VIDEO:
3109  case AVMEDIA_TYPE_SUBTITLE:
3110  goto the_end;
3111  default:
3112  break;
3113  }
3114  }
3115  }
3116  the_end:
3117  if (p && stream_index != -1)
3118  stream_index = p->stream_index[stream_index];
3119  stream_component_close(is, old_index);
3120  stream_component_open(is, stream_index);
3121 }
3122 
3123 
3125 {
3126 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3127  /* OS X needs to reallocate the SDL overlays */
3128  int i;
3129  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3130  is->pictq[i].reallocate = 1;
3131 #endif
3133  video_open(is, 1, NULL);
3134 }
3135 
3137 {
3138  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3139  int next = is->show_mode;
3140  do {
3141  next = (next + 1) % SHOW_MODE_NB;
3142  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3143  if (is->show_mode != next) {
3145  is->xleft, is->ytop, is->width, is->height,
3146  bgcolor, 1);
3147  is->force_refresh = 1;
3148  is->show_mode = next;
3149  }
3150 }
3151 
3152 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3153  double remaining_time = 0.0;
3154  SDL_PumpEvents();
3155  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3157  SDL_ShowCursor(0);
3158  cursor_hidden = 1;
3159  }
3160  if (remaining_time > 0.0)
3161  av_usleep((int64_t)(remaining_time * 1000000.0));
3162  remaining_time = REFRESH_RATE;
3163  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3164  video_refresh(is, &remaining_time);
3165  SDL_PumpEvents();
3166  }
3167 }
3168 
3169 /* handle an event sent by the GUI */
3170 static void event_loop(VideoState *cur_stream)
3171 {
3172  SDL_Event event;
3173  double incr, pos, frac;
3174 
3175  for (;;) {
3176  double x;
3177  refresh_loop_wait_event(cur_stream, &event);
3178  switch (event.type) {
3179  case SDL_KEYDOWN:
3180  if (exit_on_keydown) {
3181  do_exit(cur_stream);
3182  break;
3183  }
3184  switch (event.key.keysym.sym) {
3185  case SDLK_ESCAPE:
3186  case SDLK_q:
3187  do_exit(cur_stream);
3188  break;
3189  case SDLK_f:
3190  toggle_full_screen(cur_stream);
3191  cur_stream->force_refresh = 1;
3192  break;
3193  case SDLK_p:
3194  case SDLK_SPACE:
3195  toggle_pause(cur_stream);
3196  break;
3197  case SDLK_s: // S: Step to next frame
3198  step_to_next_frame(cur_stream);
3199  break;
3200  case SDLK_a:
3202  break;
3203  case SDLK_v:
3205  break;
3206  case SDLK_c:
3210  break;
3211  case SDLK_t:
3213  break;
3214  case SDLK_w:
3215  toggle_audio_display(cur_stream);
3216  break;
3217  case SDLK_PAGEUP:
3218  incr = 600.0;
3219  goto do_seek;
3220  case SDLK_PAGEDOWN:
3221  incr = -600.0;
3222  goto do_seek;
3223  case SDLK_LEFT:
3224  incr = -10.0;
3225  goto do_seek;
3226  case SDLK_RIGHT:
3227  incr = 10.0;
3228  goto do_seek;
3229  case SDLK_UP:
3230  incr = 60.0;
3231  goto do_seek;
3232  case SDLK_DOWN:
3233  incr = -60.0;
3234  do_seek:
3235  if (seek_by_bytes) {
3236  if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
3237  pos = cur_stream->video_current_pos;
3238  } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
3239  pos = cur_stream->audio_pkt.pos;
3240  } else
3241  pos = avio_tell(cur_stream->ic->pb);
3242  if (cur_stream->ic->bit_rate)
3243  incr *= cur_stream->ic->bit_rate / 8.0;
3244  else
3245  incr *= 180000.0;
3246  pos += incr;
3247  stream_seek(cur_stream, pos, incr, 1);
3248  } else {
3249  pos = get_master_clock(cur_stream);
3250  if (isnan(pos))
3251  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3252  pos += incr;
3253  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3254  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3255  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3256  }
3257  break;
3258  default:
3259  break;
3260  }
3261  break;
3262  case SDL_VIDEOEXPOSE:
3263  cur_stream->force_refresh = 1;
3264  break;
3265  case SDL_MOUSEBUTTONDOWN:
3266  if (exit_on_mousedown) {
3267  do_exit(cur_stream);
3268  break;
3269  }
3270  case SDL_MOUSEMOTION:
3271  if (cursor_hidden) {
3272  SDL_ShowCursor(1);
3273  cursor_hidden = 0;
3274  }
3276  if (event.type == SDL_MOUSEBUTTONDOWN) {
3277  x = event.button.x;
3278  } else {
3279  if (event.motion.state != SDL_PRESSED)
3280  break;
3281  x = event.motion.x;
3282  }
3283  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3284  uint64_t size = avio_size(cur_stream->ic->pb);
3285  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3286  } else {
3287  int64_t ts;
3288  int ns, hh, mm, ss;
3289  int tns, thh, tmm, tss;
3290  tns = cur_stream->ic->duration / 1000000LL;
3291  thh = tns / 3600;
3292  tmm = (tns % 3600) / 60;
3293  tss = (tns % 60);
3294  frac = x / cur_stream->width;
3295  ns = frac * tns;
3296  hh = ns / 3600;
3297  mm = (ns % 3600) / 60;
3298  ss = (ns % 60);
3299  av_log(NULL, AV_LOG_INFO,
3300  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3301  hh, mm, ss, thh, tmm, tss);
3302  ts = frac * cur_stream->ic->duration;
3303  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3304  ts += cur_stream->ic->start_time;
3305  stream_seek(cur_stream, ts, 0, 0);
3306  }
3307  break;
3308  case SDL_VIDEORESIZE:
3309  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3310  SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
3311  if (!screen) {
3312  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3313  do_exit(cur_stream);
3314  }
3315  screen_width = cur_stream->width = screen->w;
3316  screen_height = cur_stream->height = screen->h;
3317  cur_stream->force_refresh = 1;
3318  break;
3319  case SDL_QUIT:
3320  case FF_QUIT_EVENT:
3321  do_exit(cur_stream);
3322  break;
3323  case FF_ALLOC_EVENT:
3324  alloc_picture(event.user.data1);
3325  break;
3326  default:
3327  break;
3328  }
3329  }
3330 }
3331 
3332 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3333 {
3334  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3335  return opt_default(NULL, "video_size", arg);
3336 }
3337 
3338 static int opt_width(void *optctx, const char *opt, const char *arg)
3339 {
3340  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3341  return 0;
3342 }
3343 
3344 static int opt_height(void *optctx, const char *opt, const char *arg)
3345 {
3346  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3347  return 0;
3348 }
3349 
3350 static int opt_format(void *optctx, const char *opt, const char *arg)
3351 {
3352  file_iformat = av_find_input_format(arg);
3353  if (!file_iformat) {
3354  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3355  return AVERROR(EINVAL);
3356  }
3357  return 0;
3358 }
3359 
3360 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3361 {
3362  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3363  return opt_default(NULL, "pixel_format", arg);
3364 }
3365 
3366 static int opt_sync(void *optctx, const char *opt, const char *arg)
3367 {
3368  if (!strcmp(arg, "audio"))
3370  else if (!strcmp(arg, "video"))
3372  else if (!strcmp(arg, "ext"))
3374  else {
3375  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3376  exit(1);
3377  }
3378  return 0;
3379 }
3380 
3381 static int opt_seek(void *optctx, const char *opt, const char *arg)
3382 {
3383  start_time = parse_time_or_die(opt, arg, 1);
3384  return 0;
3385 }
3386 
3387 static int opt_duration(void *optctx, const char *opt, const char *arg)
3388 {
3389  duration = parse_time_or_die(opt, arg, 1);
3390  return 0;
3391 }
3392 
3393 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3394 {
3395  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3396  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3397  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3398  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3399  return 0;
3400 }
3401 
3402 static void opt_input_file(void *optctx, const char *filename)
3403 {
3404  if (input_filename) {
3405  av_log(NULL, AV_LOG_FATAL,
3406  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3407  filename, input_filename);
3408  exit(1);
3409  }
3410  if (!strcmp(filename, "-"))
3411  filename = "pipe:";
3412  input_filename = filename;
3413 }
3414 
3415 static int opt_codec(void *optctx, const char *opt, const char *arg)
3416 {
3417  const char *spec = strchr(opt, ':');
3418  if (!spec) {
3419  av_log(NULL, AV_LOG_ERROR,
3420  "No media specifier was specified in '%s' in option '%s'\n",
3421  arg, opt);
3422  return AVERROR(EINVAL);
3423  }
3424  spec++;
3425  switch (spec[0]) {
3426  case 'a' : audio_codec_name = arg; break;
3427  case 's' : subtitle_codec_name = arg; break;
3428  case 'v' : video_codec_name = arg; break;
3429  default:
3430  av_log(NULL, AV_LOG_ERROR,
3431  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3432  return AVERROR(EINVAL);
3433  }
3434  return 0;
3435 }
3436 
3437 static int dummy;
3438 
3439 static const OptionDef options[] = {
3440 #include "cmdutils_common_opts.h"
3441  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3442  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3443  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3444  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3445  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3446  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3447  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3448  { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
3449  { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
3450  { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
3451  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3452  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3453  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3454  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3455  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3456  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3457  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3458  { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
3459  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3460  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3461  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3462  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3463  { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options", "bit_mask" },
3464  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3465  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3466  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3467  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3468  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3469  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3470  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3471  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3472 #if CONFIG_AVFILTER
3473  { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "set video filters", "filter_graph" },
3474  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3475 #endif
3476  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3477  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3478  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3479  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3480  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3481  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3482  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3483  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3484  { NULL, },
3485 };
3486 
3487 static void show_usage(void)
3488 {
3489  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3490  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3491  av_log(NULL, AV_LOG_INFO, "\n");
3492 }
3493 
3494 void show_help_default(const char *opt, const char *arg)
3495 {
3497  show_usage();
3498  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3499  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3500  printf("\n");
3503 #if !CONFIG_AVFILTER
3505 #else
3507 #endif
3508  printf("\nWhile playing:\n"
3509  "q, ESC quit\n"
3510  "f toggle full screen\n"
3511  "p, SPC pause\n"
3512  "a cycle audio channel in the current program\n"
3513  "v cycle video channel\n"
3514  "t cycle subtitle channel in the current program\n"
3515  "c cycle program\n"
3516  "w show audio waves\n"
3517  "s activate frame-step mode\n"
3518  "left/right seek backward/forward 10 seconds\n"
3519  "down/up seek backward/forward 1 minute\n"
3520  "page down/page up seek backward/forward 10 minutes\n"
3521  "mouse click seek to percentage in file corresponding to fraction of width\n"
3522  );
3523 }
3524 
3525 static int lockmgr(void **mtx, enum AVLockOp op)
3526 {
3527  switch(op) {
3528  case AV_LOCK_CREATE:
3529  *mtx = SDL_CreateMutex();
3530  if(!*mtx)
3531  return 1;
3532  return 0;
3533  case AV_LOCK_OBTAIN:
3534  return !!SDL_LockMutex(*mtx);
3535  case AV_LOCK_RELEASE:
3536  return !!SDL_UnlockMutex(*mtx);
3537  case AV_LOCK_DESTROY:
3538  SDL_DestroyMutex(*mtx);
3539  return 0;
3540  }
3541  return 1;
3542 }
3543 
3544 /* Called from the main */
3545 int main(int argc, char **argv)
3546 {
3547  int flags;
3548  VideoState *is;
3549  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3550 
3552  parse_loglevel(argc, argv, options);
3553 
3554  /* register all codecs, demux and protocols */
3556 #if CONFIG_AVDEVICE
3558 #endif
3559 #if CONFIG_AVFILTER
3561 #endif
3562  av_register_all();
3564 
3565  init_opts();
3566 
3567  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3568  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3569 
3570  show_banner(argc, argv, options);
3571 
3572  parse_options(NULL, argc, argv, options, opt_input_file);
3573 
3574  if (!input_filename) {
3575  show_usage();
3576  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3577  av_log(NULL, AV_LOG_FATAL,
3578  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3579  exit(1);
3580  }
3581 
3582  if (display_disable) {
3583  video_disable = 1;
3584  }
3585  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3586  if (audio_disable)
3587  flags &= ~SDL_INIT_AUDIO;
3588  if (display_disable)
3589  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3590 #if !defined(__MINGW32__) && !defined(__APPLE__)
3591  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3592 #endif
3593  if (SDL_Init (flags)) {
3594  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3595  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3596  exit(1);
3597  }
3598 
3599  if (!display_disable) {
3600  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3601  fs_screen_width = vi->current_w;
3602  fs_screen_height = vi->current_h;
3603  }
3604 
3605  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3606  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3607  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3608 
3610  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3611  do_exit(NULL);
3612  }
3613 
3614  av_init_packet(&flush_pkt);
3615  flush_pkt.data = (uint8_t *)&flush_pkt;
3616 
3617  is = stream_open(input_filename, file_iformat);
3618  if (!is) {
3619  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3620  do_exit(NULL);
3621  }
3622 
3623  event_loop(is);
3624 
3625  /* never returns */
3626 
3627  return 0;
3628 }