FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83 
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
94 
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97 
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 
102 #define CURSOR_HIDE_DELAY 1000000
103 
104 static int64_t sws_flags = SWS_BICUBIC;
105 
106 typedef struct MyAVPacketList {
109  int serial;
111 
112 typedef struct PacketQueue {
115  int size;
117  int serial;
118  SDL_mutex *mutex;
119  SDL_cond *cond;
120 } PacketQueue;
121 
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 #define SAMPLE_QUEUE_SIZE 9
125 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
126 
127 typedef struct AudioParams {
128  int freq;
129  int channels;
130  int64_t channel_layout;
134 } AudioParams;
135 
136 typedef struct Clock {
137  double pts; /* clock base */
138  double pts_drift; /* clock base minus time at which we updated the clock */
139  double last_updated;
140  double speed;
141  int serial; /* clock is based on a packet with this serial */
142  int paused;
143  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
144 } Clock;
145 
146 /* Common struct for handling all types of decoded data and allocated render buffers. */
147 typedef struct Frame {
150  int serial;
151  double pts; /* presentation timestamp for the frame */
152  double duration; /* estimated duration of the frame */
153  int64_t pos; /* byte position of the frame in the input file */
154  SDL_Overlay *bmp;
157  int width;
158  int height;
160 } Frame;
161 
162 typedef struct FrameQueue {
164  int rindex;
165  int windex;
166  int size;
167  int max_size;
170  SDL_mutex *mutex;
171  SDL_cond *cond;
173 } FrameQueue;
174 
175 enum {
176  AV_SYNC_AUDIO_MASTER, /* default choice */
178  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
179 };
180 
181 typedef struct Decoder {
187  int finished;
189  SDL_cond *empty_queue_cond;
190  int64_t start_pts;
192  int64_t next_pts;
194  SDL_Thread *decoder_tid;
195 } Decoder;
196 
197 typedef struct VideoState {
198  SDL_Thread *read_tid;
202  int paused;
205  int seek_req;
207  int64_t seek_pos;
208  int64_t seek_rel;
211  int realtime;
212 
216 
220 
224 
226 
228 
229  double audio_clock;
231  double audio_diff_cum; /* used for AV difference average computation */
241  unsigned int audio_buf_size; /* in bytes */
242  unsigned int audio_buf1_size;
243  int audio_buf_index; /* in bytes */
246 #if CONFIG_AVFILTER
247  struct AudioParams audio_filter_src;
248 #endif
253 
254  enum ShowMode {
256  } show_mode;
263  int xpos;
265 
269 
270  double frame_timer;
276  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
277 #if !CONFIG_AVFILTER
279 #endif
281  int eof;
282 
283  char filename[1024];
285  int step;
286 
287 #if CONFIG_AVFILTER
288  int vfilter_idx;
289  AVFilterContext *in_video_filter; // the first filter in the video chain
290  AVFilterContext *out_video_filter; // the last filter in the video chain
291  AVFilterContext *in_audio_filter; // the first filter in the audio chain
292  AVFilterContext *out_audio_filter; // the last filter in the audio chain
293  AVFilterGraph *agraph; // audio filter graph
294 #endif
295 
297 
299 } VideoState;
300 
301 /* options specified by the user */
303 static const char *input_filename;
304 static const char *window_title;
305 static int fs_screen_width;
306 static int fs_screen_height;
307 static int default_width = 640;
308 static int default_height = 480;
309 static int screen_width = 0;
310 static int screen_height = 0;
311 static int audio_disable;
312 static int video_disable;
313 static int subtitle_disable;
314 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
315 static int seek_by_bytes = -1;
316 static int display_disable;
317 static int show_status = 1;
319 static int64_t start_time = AV_NOPTS_VALUE;
320 static int64_t duration = AV_NOPTS_VALUE;
321 static int fast = 0;
322 static int genpts = 0;
323 static int lowres = 0;
324 static int decoder_reorder_pts = -1;
325 static int autoexit;
326 static int exit_on_keydown;
327 static int exit_on_mousedown;
328 static int loop = 1;
329 static int framedrop = -1;
330 static int infinite_buffer = -1;
331 static enum ShowMode show_mode = SHOW_MODE_NONE;
332 static const char *audio_codec_name;
333 static const char *subtitle_codec_name;
334 static const char *video_codec_name;
335 double rdftspeed = 0.02;
336 static int64_t cursor_last_shown;
337 static int cursor_hidden = 0;
338 #if CONFIG_AVFILTER
339 static const char **vfilters_list = NULL;
340 static int nb_vfilters = 0;
341 static char *afilters = NULL;
342 #endif
343 static int autorotate = 1;
344 
345 /* current context */
346 static int is_full_screen;
347 static int64_t audio_callback_time;
348 
350 
351 #define FF_ALLOC_EVENT (SDL_USEREVENT)
352 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
353 
354 static SDL_Surface *screen;
355 
356 #if CONFIG_AVFILTER
357 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
358 {
359  GROW_ARRAY(vfilters_list, nb_vfilters);
360  vfilters_list[nb_vfilters - 1] = arg;
361  return 0;
362 }
363 #endif
364 
365 static inline
366 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
367  enum AVSampleFormat fmt2, int64_t channel_count2)
368 {
369  /* If channel count == 1, planar and non-planar formats are the same */
370  if (channel_count1 == 1 && channel_count2 == 1)
372  else
373  return channel_count1 != channel_count2 || fmt1 != fmt2;
374 }
375 
376 static inline
377 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
378 {
379  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
380  return channel_layout;
381  else
382  return 0;
383 }
384 
385 static void free_picture(Frame *vp);
386 
388 {
389  MyAVPacketList *pkt1;
390 
391  if (q->abort_request)
392  return -1;
393 
394  pkt1 = av_malloc(sizeof(MyAVPacketList));
395  if (!pkt1)
396  return -1;
397  pkt1->pkt = *pkt;
398  pkt1->next = NULL;
399  if (pkt == &flush_pkt)
400  q->serial++;
401  pkt1->serial = q->serial;
402 
403  if (!q->last_pkt)
404  q->first_pkt = pkt1;
405  else
406  q->last_pkt->next = pkt1;
407  q->last_pkt = pkt1;
408  q->nb_packets++;
409  q->size += pkt1->pkt.size + sizeof(*pkt1);
410  /* XXX: should duplicate packet data in DV case */
411  SDL_CondSignal(q->cond);
412  return 0;
413 }
414 
416 {
417  int ret;
418 
419  /* duplicate the packet */
420  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
421  return -1;
422 
423  SDL_LockMutex(q->mutex);
424  ret = packet_queue_put_private(q, pkt);
425  SDL_UnlockMutex(q->mutex);
426 
427  if (pkt != &flush_pkt && ret < 0)
428  av_free_packet(pkt);
429 
430  return ret;
431 }
432 
433 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
434 {
435  AVPacket pkt1, *pkt = &pkt1;
436  av_init_packet(pkt);
437  pkt->data = NULL;
438  pkt->size = 0;
439  pkt->stream_index = stream_index;
440  return packet_queue_put(q, pkt);
441 }
442 
443 /* packet queue handling */
445 {
446  memset(q, 0, sizeof(PacketQueue));
447  q->mutex = SDL_CreateMutex();
448  q->cond = SDL_CreateCond();
449  q->abort_request = 1;
450 }
451 
453 {
454  MyAVPacketList *pkt, *pkt1;
455 
456  SDL_LockMutex(q->mutex);
457  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
458  pkt1 = pkt->next;
459  av_free_packet(&pkt->pkt);
460  av_freep(&pkt);
461  }
462  q->last_pkt = NULL;
463  q->first_pkt = NULL;
464  q->nb_packets = 0;
465  q->size = 0;
466  SDL_UnlockMutex(q->mutex);
467 }
468 
470 {
472  SDL_DestroyMutex(q->mutex);
473  SDL_DestroyCond(q->cond);
474 }
475 
477 {
478  SDL_LockMutex(q->mutex);
479 
480  q->abort_request = 1;
481 
482  SDL_CondSignal(q->cond);
483 
484  SDL_UnlockMutex(q->mutex);
485 }
486 
488 {
489  SDL_LockMutex(q->mutex);
490  q->abort_request = 0;
491  packet_queue_put_private(q, &flush_pkt);
492  SDL_UnlockMutex(q->mutex);
493 }
494 
495 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
496 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
497 {
498  MyAVPacketList *pkt1;
499  int ret;
500 
501  SDL_LockMutex(q->mutex);
502 
503  for (;;) {
504  if (q->abort_request) {
505  ret = -1;
506  break;
507  }
508 
509  pkt1 = q->first_pkt;
510  if (pkt1) {
511  q->first_pkt = pkt1->next;
512  if (!q->first_pkt)
513  q->last_pkt = NULL;
514  q->nb_packets--;
515  q->size -= pkt1->pkt.size + sizeof(*pkt1);
516  *pkt = pkt1->pkt;
517  if (serial)
518  *serial = pkt1->serial;
519  av_free(pkt1);
520  ret = 1;
521  break;
522  } else if (!block) {
523  ret = 0;
524  break;
525  } else {
526  SDL_CondWait(q->cond, q->mutex);
527  }
528  }
529  SDL_UnlockMutex(q->mutex);
530  return ret;
531 }
532 
533 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
534  memset(d, 0, sizeof(Decoder));
535  d->avctx = avctx;
536  d->queue = queue;
537  d->empty_queue_cond = empty_queue_cond;
539 }
540 
542  int got_frame = 0;
543 
544  do {
545  int ret = -1;
546 
547  if (d->queue->abort_request)
548  return -1;
549 
550  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
551  AVPacket pkt;
552  do {
553  if (d->queue->nb_packets == 0)
554  SDL_CondSignal(d->empty_queue_cond);
555  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
556  return -1;
557  if (pkt.data == flush_pkt.data) {
559  d->finished = 0;
560  d->next_pts = d->start_pts;
561  d->next_pts_tb = d->start_pts_tb;
562  }
563  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
564  av_free_packet(&d->pkt);
565  d->pkt_temp = d->pkt = pkt;
566  d->packet_pending = 1;
567  }
568 
569  switch (d->avctx->codec_type) {
570  case AVMEDIA_TYPE_VIDEO:
571  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
572  if (got_frame) {
573  if (decoder_reorder_pts == -1) {
574  frame->pts = av_frame_get_best_effort_timestamp(frame);
575  } else if (decoder_reorder_pts) {
576  frame->pts = frame->pkt_pts;
577  } else {
578  frame->pts = frame->pkt_dts;
579  }
580  }
581  break;
582  case AVMEDIA_TYPE_AUDIO:
583  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
584  if (got_frame) {
585  AVRational tb = (AVRational){1, frame->sample_rate};
586  if (frame->pts != AV_NOPTS_VALUE)
587  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
588  else if (frame->pkt_pts != AV_NOPTS_VALUE)
589  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
590  else if (d->next_pts != AV_NOPTS_VALUE)
591  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
592  if (frame->pts != AV_NOPTS_VALUE) {
593  d->next_pts = frame->pts + frame->nb_samples;
594  d->next_pts_tb = tb;
595  }
596  }
597  break;
599  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
600  break;
601  }
602 
603  if (ret < 0) {
604  d->packet_pending = 0;
605  } else {
606  d->pkt_temp.dts =
608  if (d->pkt_temp.data) {
610  ret = d->pkt_temp.size;
611  d->pkt_temp.data += ret;
612  d->pkt_temp.size -= ret;
613  if (d->pkt_temp.size <= 0)
614  d->packet_pending = 0;
615  } else {
616  if (!got_frame) {
617  d->packet_pending = 0;
618  d->finished = d->pkt_serial;
619  }
620  }
621  }
622  } while (!got_frame && !d->finished);
623 
624  return got_frame;
625 }
626 
627 static void decoder_destroy(Decoder *d) {
628  av_free_packet(&d->pkt);
629 }
630 
632 {
633  av_frame_unref(vp->frame);
634  avsubtitle_free(&vp->sub);
635 }
636 
637 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
638 {
639  int i;
640  memset(f, 0, sizeof(FrameQueue));
641  if (!(f->mutex = SDL_CreateMutex()))
642  return AVERROR(ENOMEM);
643  if (!(f->cond = SDL_CreateCond()))
644  return AVERROR(ENOMEM);
645  f->pktq = pktq;
646  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
647  f->keep_last = !!keep_last;
648  for (i = 0; i < f->max_size; i++)
649  if (!(f->queue[i].frame = av_frame_alloc()))
650  return AVERROR(ENOMEM);
651  return 0;
652 }
653 
655 {
656  int i;
657  for (i = 0; i < f->max_size; i++) {
658  Frame *vp = &f->queue[i];
660  av_frame_free(&vp->frame);
661  free_picture(vp);
662  }
663  SDL_DestroyMutex(f->mutex);
664  SDL_DestroyCond(f->cond);
665 }
666 
668 {
669  SDL_LockMutex(f->mutex);
670  SDL_CondSignal(f->cond);
671  SDL_UnlockMutex(f->mutex);
672 }
673 
675 {
676  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
677 }
678 
680 {
681  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
682 }
683 
685 {
686  return &f->queue[f->rindex];
687 }
688 
690 {
691  /* wait until we have space to put a new frame */
692  SDL_LockMutex(f->mutex);
693  while (f->size >= f->max_size &&
694  !f->pktq->abort_request) {
695  SDL_CondWait(f->cond, f->mutex);
696  }
697  SDL_UnlockMutex(f->mutex);
698 
699  if (f->pktq->abort_request)
700  return NULL;
701 
702  return &f->queue[f->windex];
703 }
704 
706 {
707  /* wait until we have a readable a new frame */
708  SDL_LockMutex(f->mutex);
709  while (f->size - f->rindex_shown <= 0 &&
710  !f->pktq->abort_request) {
711  SDL_CondWait(f->cond, f->mutex);
712  }
713  SDL_UnlockMutex(f->mutex);
714 
715  if (f->pktq->abort_request)
716  return NULL;
717 
718  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
719 }
720 
722 {
723  if (++f->windex == f->max_size)
724  f->windex = 0;
725  SDL_LockMutex(f->mutex);
726  f->size++;
727  SDL_CondSignal(f->cond);
728  SDL_UnlockMutex(f->mutex);
729 }
730 
732 {
733  if (f->keep_last && !f->rindex_shown) {
734  f->rindex_shown = 1;
735  return;
736  }
738  if (++f->rindex == f->max_size)
739  f->rindex = 0;
740  SDL_LockMutex(f->mutex);
741  f->size--;
742  SDL_CondSignal(f->cond);
743  SDL_UnlockMutex(f->mutex);
744 }
745 
746 /* jump back to the previous frame if available by resetting rindex_shown */
748 {
749  int ret = f->rindex_shown;
750  f->rindex_shown = 0;
751  return ret;
752 }
753 
754 /* return the number of undisplayed frames in the queue */
756 {
757  return f->size - f->rindex_shown;
758 }
759 
760 /* return last shown position */
762 {
763  Frame *fp = &f->queue[f->rindex];
764  if (f->rindex_shown && fp->serial == f->pktq->serial)
765  return fp->pos;
766  else
767  return -1;
768 }
769 
770 static void decoder_abort(Decoder *d, FrameQueue *fq)
771 {
773  frame_queue_signal(fq);
774  SDL_WaitThread(d->decoder_tid, NULL);
775  d->decoder_tid = NULL;
777 }
778 
779 static inline void fill_rectangle(SDL_Surface *screen,
780  int x, int y, int w, int h, int color, int update)
781 {
782  SDL_Rect rect;
783  rect.x = x;
784  rect.y = y;
785  rect.w = w;
786  rect.h = h;
787  SDL_FillRect(screen, &rect, color);
788  if (update && w > 0 && h > 0)
789  SDL_UpdateRect(screen, x, y, w, h);
790 }
791 
792 /* draw only the border of a rectangle */
793 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
794 {
795  int w1, w2, h1, h2;
796 
797  /* fill the background */
798  w1 = x;
799  if (w1 < 0)
800  w1 = 0;
801  w2 = width - (x + w);
802  if (w2 < 0)
803  w2 = 0;
804  h1 = y;
805  if (h1 < 0)
806  h1 = 0;
807  h2 = height - (y + h);
808  if (h2 < 0)
809  h2 = 0;
811  xleft, ytop,
812  w1, height,
813  color, update);
815  xleft + width - w2, ytop,
816  w2, height,
817  color, update);
819  xleft + w1, ytop,
820  width - w1 - w2, h1,
821  color, update);
823  xleft + w1, ytop + height - h2,
824  width - w1 - w2, h2,
825  color, update);
826 }
827 
828 #define ALPHA_BLEND(a, oldp, newp, s)\
829 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
830 
831 #define RGBA_IN(r, g, b, a, s)\
832 {\
833  unsigned int v = ((const uint32_t *)(s))[0];\
834  a = (v >> 24) & 0xff;\
835  r = (v >> 16) & 0xff;\
836  g = (v >> 8) & 0xff;\
837  b = v & 0xff;\
838 }
839 
840 #define YUVA_IN(y, u, v, a, s, pal)\
841 {\
842  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
843  a = (val >> 24) & 0xff;\
844  y = (val >> 16) & 0xff;\
845  u = (val >> 8) & 0xff;\
846  v = val & 0xff;\
847 }
848 
849 #define YUVA_OUT(d, y, u, v, a)\
850 {\
851  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
852 }
853 
854 
855 #define BPP 1
856 
857 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
858 {
859  int wrap, wrap3, width2, skip2;
860  int y, u, v, a, u1, v1, a1, w, h;
861  uint8_t *lum, *cb, *cr;
862  const uint8_t *p;
863  const uint32_t *pal;
864  int dstx, dsty, dstw, dsth;
865 
866  dstw = av_clip(rect->w, 0, imgw);
867  dsth = av_clip(rect->h, 0, imgh);
868  dstx = av_clip(rect->x, 0, imgw - dstw);
869  dsty = av_clip(rect->y, 0, imgh - dsth);
870  lum = dst->data[0] + dsty * dst->linesize[0];
871  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
872  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
873 
874  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
875  skip2 = dstx >> 1;
876  wrap = dst->linesize[0];
877  wrap3 = rect->pict.linesize[0];
878  p = rect->pict.data[0];
879  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
880 
881  if (dsty & 1) {
882  lum += dstx;
883  cb += skip2;
884  cr += skip2;
885 
886  if (dstx & 1) {
887  YUVA_IN(y, u, v, a, p, pal);
888  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
889  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
890  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
891  cb++;
892  cr++;
893  lum++;
894  p += BPP;
895  }
896  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
897  YUVA_IN(y, u, v, a, p, pal);
898  u1 = u;
899  v1 = v;
900  a1 = a;
901  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
902 
903  YUVA_IN(y, u, v, a, p + BPP, pal);
904  u1 += u;
905  v1 += v;
906  a1 += a;
907  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
908  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
909  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
910  cb++;
911  cr++;
912  p += 2 * BPP;
913  lum += 2;
914  }
915  if (w) {
916  YUVA_IN(y, u, v, a, p, pal);
917  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
918  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
919  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
920  p++;
921  lum++;
922  }
923  p += wrap3 - dstw * BPP;
924  lum += wrap - dstw - dstx;
925  cb += dst->linesize[1] - width2 - skip2;
926  cr += dst->linesize[2] - width2 - skip2;
927  }
928  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
929  lum += dstx;
930  cb += skip2;
931  cr += skip2;
932 
933  if (dstx & 1) {
934  YUVA_IN(y, u, v, a, p, pal);
935  u1 = u;
936  v1 = v;
937  a1 = a;
938  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
939  p += wrap3;
940  lum += wrap;
941  YUVA_IN(y, u, v, a, p, pal);
942  u1 += u;
943  v1 += v;
944  a1 += a;
945  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
946  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
947  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
948  cb++;
949  cr++;
950  p += -wrap3 + BPP;
951  lum += -wrap + 1;
952  }
953  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
954  YUVA_IN(y, u, v, a, p, pal);
955  u1 = u;
956  v1 = v;
957  a1 = a;
958  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
959 
960  YUVA_IN(y, u, v, a, p + BPP, pal);
961  u1 += u;
962  v1 += v;
963  a1 += a;
964  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
965  p += wrap3;
966  lum += wrap;
967 
968  YUVA_IN(y, u, v, a, p, pal);
969  u1 += u;
970  v1 += v;
971  a1 += a;
972  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
973 
974  YUVA_IN(y, u, v, a, p + BPP, pal);
975  u1 += u;
976  v1 += v;
977  a1 += a;
978  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
979 
980  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
981  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
982 
983  cb++;
984  cr++;
985  p += -wrap3 + 2 * BPP;
986  lum += -wrap + 2;
987  }
988  if (w) {
989  YUVA_IN(y, u, v, a, p, pal);
990  u1 = u;
991  v1 = v;
992  a1 = a;
993  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
994  p += wrap3;
995  lum += wrap;
996  YUVA_IN(y, u, v, a, p, pal);
997  u1 += u;
998  v1 += v;
999  a1 += a;
1000  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1001  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
1002  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
1003  cb++;
1004  cr++;
1005  p += -wrap3 + BPP;
1006  lum += -wrap + 1;
1007  }
1008  p += wrap3 + (wrap3 - dstw * BPP);
1009  lum += wrap + (wrap - dstw - dstx);
1010  cb += dst->linesize[1] - width2 - skip2;
1011  cr += dst->linesize[2] - width2 - skip2;
1012  }
1013  /* handle odd height */
1014  if (h) {
1015  lum += dstx;
1016  cb += skip2;
1017  cr += skip2;
1018 
1019  if (dstx & 1) {
1020  YUVA_IN(y, u, v, a, p, pal);
1021  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1022  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1023  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1024  cb++;
1025  cr++;
1026  lum++;
1027  p += BPP;
1028  }
1029  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
1030  YUVA_IN(y, u, v, a, p, pal);
1031  u1 = u;
1032  v1 = v;
1033  a1 = a;
1034  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1035 
1036  YUVA_IN(y, u, v, a, p + BPP, pal);
1037  u1 += u;
1038  v1 += v;
1039  a1 += a;
1040  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
1041  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
1042  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
1043  cb++;
1044  cr++;
1045  p += 2 * BPP;
1046  lum += 2;
1047  }
1048  if (w) {
1049  YUVA_IN(y, u, v, a, p, pal);
1050  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1051  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1052  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1053  }
1054  }
1055 }
1056 
1057 static void free_picture(Frame *vp)
1058 {
1059  if (vp->bmp) {
1060  SDL_FreeYUVOverlay(vp->bmp);
1061  vp->bmp = NULL;
1062  }
1063 }
1064 
1065 static void calculate_display_rect(SDL_Rect *rect,
1066  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
1067  int pic_width, int pic_height, AVRational pic_sar)
1068 {
1069  float aspect_ratio;
1070  int width, height, x, y;
1071 
1072  if (pic_sar.num == 0)
1073  aspect_ratio = 0;
1074  else
1075  aspect_ratio = av_q2d(pic_sar);
1076 
1077  if (aspect_ratio <= 0.0)
1078  aspect_ratio = 1.0;
1079  aspect_ratio *= (float)pic_width / (float)pic_height;
1080 
1081  /* XXX: we suppose the screen has a 1.0 pixel ratio */
1082  height = scr_height;
1083  width = ((int)rint(height * aspect_ratio)) & ~1;
1084  if (width > scr_width) {
1085  width = scr_width;
1086  height = ((int)rint(width / aspect_ratio)) & ~1;
1087  }
1088  x = (scr_width - width) / 2;
1089  y = (scr_height - height) / 2;
1090  rect->x = scr_xleft + x;
1091  rect->y = scr_ytop + y;
1092  rect->w = FFMAX(width, 1);
1093  rect->h = FFMAX(height, 1);
1094 }
1095 
1097 {
1098  Frame *vp;
1099  Frame *sp;
1100  AVPicture pict;
1101  SDL_Rect rect;
1102  int i;
1103 
1104  vp = frame_queue_peek(&is->pictq);
1105  if (vp->bmp) {
1106  if (is->subtitle_st) {
1107  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1108  sp = frame_queue_peek(&is->subpq);
1109 
1110  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1111  SDL_LockYUVOverlay (vp->bmp);
1112 
1113  pict.data[0] = vp->bmp->pixels[0];
1114  pict.data[1] = vp->bmp->pixels[2];
1115  pict.data[2] = vp->bmp->pixels[1];
1116 
1117  pict.linesize[0] = vp->bmp->pitches[0];
1118  pict.linesize[1] = vp->bmp->pitches[2];
1119  pict.linesize[2] = vp->bmp->pitches[1];
1120 
1121  for (i = 0; i < sp->sub.num_rects; i++)
1122  blend_subrect(&pict, sp->sub.rects[i],
1123  vp->bmp->w, vp->bmp->h);
1124 
1125  SDL_UnlockYUVOverlay (vp->bmp);
1126  }
1127  }
1128  }
1129 
1130  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1131 
1132  SDL_DisplayYUVOverlay(vp->bmp, &rect);
1133 
1134  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1135  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1136  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1137  is->last_display_rect = rect;
1138  }
1139  }
1140 }
1141 
1142 static inline int compute_mod(int a, int b)
1143 {
1144  return a < 0 ? a%b + b : a%b;
1145 }
1146 
1148 {
1149  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1150  int ch, channels, h, h2, bgcolor, fgcolor;
1151  int64_t time_diff;
1152  int rdft_bits, nb_freq;
1153 
1154  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1155  ;
1156  nb_freq = 1 << (rdft_bits - 1);
1157 
1158  /* compute display index : center on currently output samples */
1159  channels = s->audio_tgt.channels;
1160  nb_display_channels = channels;
1161  if (!s->paused) {
1162  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1163  n = 2 * channels;
1164  delay = s->audio_write_buf_size;
1165  delay /= n;
1166 
1167  /* to be more precise, we take into account the time spent since
1168  the last buffer computation */
1169  if (audio_callback_time) {
1170  time_diff = av_gettime_relative() - audio_callback_time;
1171  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1172  }
1173 
1174  delay += 2 * data_used;
1175  if (delay < data_used)
1176  delay = data_used;
1177 
1178  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1179  if (s->show_mode == SHOW_MODE_WAVES) {
1180  h = INT_MIN;
1181  for (i = 0; i < 1000; i += channels) {
1182  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1183  int a = s->sample_array[idx];
1184  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1185  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1186  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1187  int score = a - d;
1188  if (h < score && (b ^ c) < 0) {
1189  h = score;
1190  i_start = idx;
1191  }
1192  }
1193  }
1194 
1195  s->last_i_start = i_start;
1196  } else {
1197  i_start = s->last_i_start;
1198  }
1199 
1200  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1201  if (s->show_mode == SHOW_MODE_WAVES) {
1203  s->xleft, s->ytop, s->width, s->height,
1204  bgcolor, 0);
1205 
1206  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1207 
1208  /* total height for one channel */
1209  h = s->height / nb_display_channels;
1210  /* graph height / 2 */
1211  h2 = (h * 9) / 20;
1212  for (ch = 0; ch < nb_display_channels; ch++) {
1213  i = i_start + ch;
1214  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1215  for (x = 0; x < s->width; x++) {
1216  y = (s->sample_array[i] * h2) >> 15;
1217  if (y < 0) {
1218  y = -y;
1219  ys = y1 - y;
1220  } else {
1221  ys = y1;
1222  }
1224  s->xleft + x, ys, 1, y,
1225  fgcolor, 0);
1226  i += channels;
1227  if (i >= SAMPLE_ARRAY_SIZE)
1228  i -= SAMPLE_ARRAY_SIZE;
1229  }
1230  }
1231 
1232  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1233 
1234  for (ch = 1; ch < nb_display_channels; ch++) {
1235  y = s->ytop + ch * h;
1237  s->xleft, y, s->width, 1,
1238  fgcolor, 0);
1239  }
1240  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1241  } else {
1242  nb_display_channels= FFMIN(nb_display_channels, 2);
1243  if (rdft_bits != s->rdft_bits) {
1244  av_rdft_end(s->rdft);
1245  av_free(s->rdft_data);
1246  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1247  s->rdft_bits = rdft_bits;
1248  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1249  }
1250  if (!s->rdft || !s->rdft_data){
1251  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1252  s->show_mode = SHOW_MODE_WAVES;
1253  } else {
1254  FFTSample *data[2];
1255  for (ch = 0; ch < nb_display_channels; ch++) {
1256  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1257  i = i_start + ch;
1258  for (x = 0; x < 2 * nb_freq; x++) {
1259  double w = (x-nb_freq) * (1.0 / nb_freq);
1260  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1261  i += channels;
1262  if (i >= SAMPLE_ARRAY_SIZE)
1263  i -= SAMPLE_ARRAY_SIZE;
1264  }
1265  av_rdft_calc(s->rdft, data[ch]);
1266  }
1267  /* Least efficient way to do this, we should of course
1268  * directly access it but it is more than fast enough. */
1269  for (y = 0; y < s->height; y++) {
1270  double w = 1 / sqrt(nb_freq);
1271  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1272  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1273  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1274  a = FFMIN(a, 255);
1275  b = FFMIN(b, 255);
1276  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1277 
1279  s->xpos, s->height-y, 1, 1,
1280  fgcolor, 0);
1281  }
1282  }
1283  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1284  if (!s->paused)
1285  s->xpos++;
1286  if (s->xpos >= s->width)
1287  s->xpos= s->xleft;
1288  }
1289 }
1290 
1291 static void stream_close(VideoState *is)
1292 {
1293  /* XXX: use a special url_shutdown call to abort parse cleanly */
1294  is->abort_request = 1;
1295  SDL_WaitThread(is->read_tid, NULL);
1299 
1300  /* free all pictures */
1301  frame_queue_destory(&is->pictq);
1302  frame_queue_destory(&is->sampq);
1303  frame_queue_destory(&is->subpq);
1304  SDL_DestroyCond(is->continue_read_thread);
1305 #if !CONFIG_AVFILTER
1307 #endif
1308  av_free(is);
1309 }
1310 
1311 static void do_exit(VideoState *is)
1312 {
1313  if (is) {
1314  stream_close(is);
1315  }
1317  uninit_opts();
1318 #if CONFIG_AVFILTER
1319  av_freep(&vfilters_list);
1320 #endif
1322  if (show_status)
1323  printf("\n");
1324  SDL_Quit();
1325  av_log(NULL, AV_LOG_QUIET, "%s", "");
1326  exit(0);
1327 }
1328 
1329 static void sigterm_handler(int sig)
1330 {
1331  exit(123);
1332 }
1333 
1335 {
1336  SDL_Rect rect;
1337  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1338  default_width = rect.w;
1339  default_height = rect.h;
1340 }
1341 
1342 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1343 {
1344  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1345  int w,h;
1346 
1347  if (is_full_screen) flags |= SDL_FULLSCREEN;
1348  else flags |= SDL_RESIZABLE;
1349 
1350  if (vp && vp->width)
1351  set_default_window_size(vp->width, vp->height, vp->sar);
1352 
1354  w = fs_screen_width;
1355  h = fs_screen_height;
1356  } else if (!is_full_screen && screen_width) {
1357  w = screen_width;
1358  h = screen_height;
1359  } else {
1360  w = default_width;
1361  h = default_height;
1362  }
1363  w = FFMIN(16383, w);
1364  if (screen && is->width == screen->w && screen->w == w
1365  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1366  return 0;
1367  screen = SDL_SetVideoMode(w, h, 0, flags);
1368  if (!screen) {
1369  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1370  do_exit(is);
1371  }
1372  if (!window_title)
1374  SDL_WM_SetCaption(window_title, window_title);
1375 
1376  is->width = screen->w;
1377  is->height = screen->h;
1378 
1379  return 0;
1380 }
1381 
1382 /* display the current picture, if any */
1383 static void video_display(VideoState *is)
1384 {
1385  if (!screen)
1386  video_open(is, 0, NULL);
1387  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1388  video_audio_display(is);
1389  else if (is->video_st)
1390  video_image_display(is);
1391 }
1392 
1393 static double get_clock(Clock *c)
1394 {
1395  if (*c->queue_serial != c->serial)
1396  return NAN;
1397  if (c->paused) {
1398  return c->pts;
1399  } else {
1400  double time = av_gettime_relative() / 1000000.0;
1401  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1402  }
1403 }
1404 
1405 static void set_clock_at(Clock *c, double pts, int serial, double time)
1406 {
1407  c->pts = pts;
1408  c->last_updated = time;
1409  c->pts_drift = c->pts - time;
1410  c->serial = serial;
1411 }
1412 
1413 static void set_clock(Clock *c, double pts, int serial)
1414 {
1415  double time = av_gettime_relative() / 1000000.0;
1416  set_clock_at(c, pts, serial, time);
1417 }
1418 
1419 static void set_clock_speed(Clock *c, double speed)
1420 {
1421  set_clock(c, get_clock(c), c->serial);
1422  c->speed = speed;
1423 }
1424 
1425 static void init_clock(Clock *c, int *queue_serial)
1426 {
1427  c->speed = 1.0;
1428  c->paused = 0;
1429  c->queue_serial = queue_serial;
1430  set_clock(c, NAN, -1);
1431 }
1432 
1433 static void sync_clock_to_slave(Clock *c, Clock *slave)
1434 {
1435  double clock = get_clock(c);
1436  double slave_clock = get_clock(slave);
1437  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1438  set_clock(c, slave_clock, slave->serial);
1439 }
1440 
1442  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1443  if (is->video_st)
1444  return AV_SYNC_VIDEO_MASTER;
1445  else
1446  return AV_SYNC_AUDIO_MASTER;
1447  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1448  if (is->audio_st)
1449  return AV_SYNC_AUDIO_MASTER;
1450  else
1451  return AV_SYNC_EXTERNAL_CLOCK;
1452  } else {
1453  return AV_SYNC_EXTERNAL_CLOCK;
1454  }
1455 }
1456 
1457 /* get the current master clock value */
1458 static double get_master_clock(VideoState *is)
1459 {
1460  double val;
1461 
1462  switch (get_master_sync_type(is)) {
1463  case AV_SYNC_VIDEO_MASTER:
1464  val = get_clock(&is->vidclk);
1465  break;
1466  case AV_SYNC_AUDIO_MASTER:
1467  val = get_clock(&is->audclk);
1468  break;
1469  default:
1470  val = get_clock(&is->extclk);
1471  break;
1472  }
1473  return val;
1474 }
1475 
1477  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1478  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1480  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1481  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1483  } else {
1484  double speed = is->extclk.speed;
1485  if (speed != 1.0)
1486  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1487  }
1488 }
1489 
1490 /* seek in the stream */
1491 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1492 {
1493  if (!is->seek_req) {
1494  is->seek_pos = pos;
1495  is->seek_rel = rel;
1496  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1497  if (seek_by_bytes)
1499  is->seek_req = 1;
1500  SDL_CondSignal(is->continue_read_thread);
1501  }
1502 }
1503 
1504 /* pause or resume the video */
1506 {
1507  if (is->paused) {
1508  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1509  if (is->read_pause_return != AVERROR(ENOSYS)) {
1510  is->vidclk.paused = 0;
1511  }
1512  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1513  }
1514  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1515  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1516 }
1517 
1518 static void toggle_pause(VideoState *is)
1519 {
1520  stream_toggle_pause(is);
1521  is->step = 0;
1522 }
1523 
1525 {
1526  /* if the stream is paused unpause it, then step */
1527  if (is->paused)
1528  stream_toggle_pause(is);
1529  is->step = 1;
1530 }
1531 
1532 static double compute_target_delay(double delay, VideoState *is)
1533 {
1534  double sync_threshold, diff;
1535 
1536  /* update delay to follow master synchronisation source */
1538  /* if video is slave, we try to correct big delays by
1539  duplicating or deleting a frame */
1540  diff = get_clock(&is->vidclk) - get_master_clock(is);
1541 
1542  /* skip or repeat frame. We take into account the
1543  delay to compute the threshold. I still don't know
1544  if it is the best guess */
1545  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1546  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1547  if (diff <= -sync_threshold)
1548  delay = FFMAX(0, delay + diff);
1549  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1550  delay = delay + diff;
1551  else if (diff >= sync_threshold)
1552  delay = 2 * delay;
1553  }
1554  }
1555 
1556  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1557  delay, -diff);
1558 
1559  return delay;
1560 }
1561 
1562 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1563  if (vp->serial == nextvp->serial) {
1564  double duration = nextvp->pts - vp->pts;
1565  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1566  return vp->duration;
1567  else
1568  return duration;
1569  } else {
1570  return 0.0;
1571  }
1572 }
1573 
1574 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1575  /* update current video pts */
1576  set_clock(&is->vidclk, pts, serial);
1577  sync_clock_to_slave(&is->extclk, &is->vidclk);
1578 }
1579 
1580 /* called to display each frame */
1581 static void video_refresh(void *opaque, double *remaining_time)
1582 {
1583  VideoState *is = opaque;
1584  double time;
1585 
1586  Frame *sp, *sp2;
1587 
1588  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1590 
1591  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1592  time = av_gettime_relative() / 1000000.0;
1593  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1594  video_display(is);
1595  is->last_vis_time = time;
1596  }
1597  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1598  }
1599 
1600  if (is->video_st) {
1601  int redisplay = 0;
1602  if (is->force_refresh)
1603  redisplay = frame_queue_prev(&is->pictq);
1604 retry:
1605  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1606  // nothing to do, no picture to display in the queue
1607  } else {
1608  double last_duration, duration, delay;
1609  Frame *vp, *lastvp;
1610 
1611  /* dequeue the picture */
1612  lastvp = frame_queue_peek_last(&is->pictq);
1613  vp = frame_queue_peek(&is->pictq);
1614 
1615  if (vp->serial != is->videoq.serial) {
1616  frame_queue_next(&is->pictq);
1617  redisplay = 0;
1618  goto retry;
1619  }
1620 
1621  if (lastvp->serial != vp->serial && !redisplay)
1622  is->frame_timer = av_gettime_relative() / 1000000.0;
1623 
1624  if (is->paused)
1625  goto display;
1626 
1627  /* compute nominal last_duration */
1628  last_duration = vp_duration(is, lastvp, vp);
1629  if (redisplay)
1630  delay = 0.0;
1631  else
1632  delay = compute_target_delay(last_duration, is);
1633 
1634  time= av_gettime_relative()/1000000.0;
1635  if (time < is->frame_timer + delay && !redisplay) {
1636  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1637  return;
1638  }
1639 
1640  is->frame_timer += delay;
1641  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1642  is->frame_timer = time;
1643 
1644  SDL_LockMutex(is->pictq.mutex);
1645  if (!redisplay && !isnan(vp->pts))
1646  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1647  SDL_UnlockMutex(is->pictq.mutex);
1648 
1649  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1650  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1651  duration = vp_duration(is, vp, nextvp);
1652  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1653  if (!redisplay)
1654  is->frame_drops_late++;
1655  frame_queue_next(&is->pictq);
1656  redisplay = 0;
1657  goto retry;
1658  }
1659  }
1660 
1661  if (is->subtitle_st) {
1662  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1663  sp = frame_queue_peek(&is->subpq);
1664 
1665  if (frame_queue_nb_remaining(&is->subpq) > 1)
1666  sp2 = frame_queue_peek_next(&is->subpq);
1667  else
1668  sp2 = NULL;
1669 
1670  if (sp->serial != is->subtitleq.serial
1671  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1672  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1673  {
1674  frame_queue_next(&is->subpq);
1675  } else {
1676  break;
1677  }
1678  }
1679  }
1680 
1681 display:
1682  /* display picture */
1683  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1684  video_display(is);
1685 
1686  frame_queue_next(&is->pictq);
1687 
1688  if (is->step && !is->paused)
1689  stream_toggle_pause(is);
1690  }
1691  }
1692  is->force_refresh = 0;
1693  if (show_status) {
1694  static int64_t last_time;
1695  int64_t cur_time;
1696  int aqsize, vqsize, sqsize;
1697  double av_diff;
1698 
1699  cur_time = av_gettime_relative();
1700  if (!last_time || (cur_time - last_time) >= 30000) {
1701  aqsize = 0;
1702  vqsize = 0;
1703  sqsize = 0;
1704  if (is->audio_st)
1705  aqsize = is->audioq.size;
1706  if (is->video_st)
1707  vqsize = is->videoq.size;
1708  if (is->subtitle_st)
1709  sqsize = is->subtitleq.size;
1710  av_diff = 0;
1711  if (is->audio_st && is->video_st)
1712  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1713  else if (is->video_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1715  else if (is->audio_st)
1716  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1718  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1719  get_master_clock(is),
1720  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1721  av_diff,
1723  aqsize / 1024,
1724  vqsize / 1024,
1725  sqsize,
1728  fflush(stdout);
1729  last_time = cur_time;
1730  }
1731  }
1732 }
1733 
1734 /* allocate a picture (needs to do that in main thread to avoid
1735  potential locking problems */
1736 static void alloc_picture(VideoState *is)
1737 {
1738  Frame *vp;
1739  int64_t bufferdiff;
1740 
1741  vp = &is->pictq.queue[is->pictq.windex];
1742 
1743  free_picture(vp);
1744 
1745  video_open(is, 0, vp);
1746 
1747  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1748  SDL_YV12_OVERLAY,
1749  screen);
1750  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1751  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1752  /* SDL allocates a buffer smaller than requested if the video
1753  * overlay hardware is unable to support the requested size. */
1755  "Error: the video system does not support an image\n"
1756  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1757  "to reduce the image size.\n", vp->width, vp->height );
1758  do_exit(is);
1759  }
1760 
1761  SDL_LockMutex(is->pictq.mutex);
1762  vp->allocated = 1;
1763  SDL_CondSignal(is->pictq.cond);
1764  SDL_UnlockMutex(is->pictq.mutex);
1765 }
1766 
1767 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1768  int i, width, height;
1769  Uint8 *p, *maxp;
1770  for (i = 0; i < 3; i++) {
1771  width = bmp->w;
1772  height = bmp->h;
1773  if (i > 0) {
1774  width >>= 1;
1775  height >>= 1;
1776  }
1777  if (bmp->pitches[i] > width) {
1778  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1779  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1780  *(p+1) = *p;
1781  }
1782  }
1783 }
1784 
1785 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1786 {
1787  Frame *vp;
1788 
1789 #if defined(DEBUG_SYNC) && 0
1790  printf("frame_type=%c pts=%0.3f\n",
1791  av_get_picture_type_char(src_frame->pict_type), pts);
1792 #endif
1793 
1794  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1795  return -1;
1796 
1797  vp->sar = src_frame->sample_aspect_ratio;
1798 
1799  /* alloc or resize hardware picture buffer */
1800  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1801  vp->width != src_frame->width ||
1802  vp->height != src_frame->height) {
1803  SDL_Event event;
1804 
1805  vp->allocated = 0;
1806  vp->reallocate = 0;
1807  vp->width = src_frame->width;
1808  vp->height = src_frame->height;
1809 
1810  /* the allocation must be done in the main thread to avoid
1811  locking problems. */
1812  event.type = FF_ALLOC_EVENT;
1813  event.user.data1 = is;
1814  SDL_PushEvent(&event);
1815 
1816  /* wait until the picture is allocated */
1817  SDL_LockMutex(is->pictq.mutex);
1818  while (!vp->allocated && !is->videoq.abort_request) {
1819  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1820  }
1821  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1822  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1823  while (!vp->allocated && !is->abort_request) {
1824  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1825  }
1826  }
1827  SDL_UnlockMutex(is->pictq.mutex);
1828 
1829  if (is->videoq.abort_request)
1830  return -1;
1831  }
1832 
1833  /* if the frame is not skipped, then display it */
1834  if (vp->bmp) {
1835  AVPicture pict = { { 0 } };
1836 
1837  /* get a pointer on the bitmap */
1838  SDL_LockYUVOverlay (vp->bmp);
1839 
1840  pict.data[0] = vp->bmp->pixels[0];
1841  pict.data[1] = vp->bmp->pixels[2];
1842  pict.data[2] = vp->bmp->pixels[1];
1843 
1844  pict.linesize[0] = vp->bmp->pitches[0];
1845  pict.linesize[1] = vp->bmp->pitches[2];
1846  pict.linesize[2] = vp->bmp->pitches[1];
1847 
1848 #if CONFIG_AVFILTER
1849  // FIXME use direct rendering
1850  av_picture_copy(&pict, (AVPicture *)src_frame,
1851  src_frame->format, vp->width, vp->height);
1852 #else
1853  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1855  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1857  if (!is->img_convert_ctx) {
1858  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1859  exit(1);
1860  }
1861  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1862  0, vp->height, pict.data, pict.linesize);
1863 #endif
1864  /* workaround SDL PITCH_WORKAROUND */
1866  /* update the bitmap content */
1867  SDL_UnlockYUVOverlay(vp->bmp);
1868 
1869  vp->pts = pts;
1870  vp->duration = duration;
1871  vp->pos = pos;
1872  vp->serial = serial;
1873 
1874  /* now we can update the picture count */
1875  frame_queue_push(&is->pictq);
1876  }
1877  return 0;
1878 }
1879 
1881 {
1882  int got_picture;
1883 
1884  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1885  return -1;
1886 
1887  if (got_picture) {
1888  double dpts = NAN;
1889 
1890  if (frame->pts != AV_NOPTS_VALUE)
1891  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1892 
1893  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1894 
1896  if (frame->pts != AV_NOPTS_VALUE) {
1897  double diff = dpts - get_master_clock(is);
1898  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1899  diff - is->frame_last_filter_delay < 0 &&
1900  is->viddec.pkt_serial == is->vidclk.serial &&
1901  is->videoq.nb_packets) {
1902  is->frame_drops_early++;
1903  av_frame_unref(frame);
1904  got_picture = 0;
1905  }
1906  }
1907  }
1908  }
1909 
1910  return got_picture;
1911 }
1912 
1913 #if CONFIG_AVFILTER
1914 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1915  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1916 {
1917  int ret, i;
1918  int nb_filters = graph->nb_filters;
1920 
1921  if (filtergraph) {
1922  outputs = avfilter_inout_alloc();
1923  inputs = avfilter_inout_alloc();
1924  if (!outputs || !inputs) {
1925  ret = AVERROR(ENOMEM);
1926  goto fail;
1927  }
1928 
1929  outputs->name = av_strdup("in");
1930  outputs->filter_ctx = source_ctx;
1931  outputs->pad_idx = 0;
1932  outputs->next = NULL;
1933 
1934  inputs->name = av_strdup("out");
1935  inputs->filter_ctx = sink_ctx;
1936  inputs->pad_idx = 0;
1937  inputs->next = NULL;
1938 
1939  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1940  goto fail;
1941  } else {
1942  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1943  goto fail;
1944  }
1945 
1946  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1947  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1948  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1949 
1950  ret = avfilter_graph_config(graph, NULL);
1951 fail:
1952  avfilter_inout_free(&outputs);
1953  avfilter_inout_free(&inputs);
1954  return ret;
1955 }
1956 
1957 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1958 {
1959  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1960  char sws_flags_str[128];
1961  char buffersrc_args[256];
1962  int ret;
1963  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1964  AVCodecContext *codec = is->video_st->codec;
1965  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1966 
1967  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1968  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1969  graph->scale_sws_opts = av_strdup(sws_flags_str);
1970 
1971  snprintf(buffersrc_args, sizeof(buffersrc_args),
1972  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1973  frame->width, frame->height, frame->format,
1975  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1976  if (fr.num && fr.den)
1977  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1978 
1979  if ((ret = avfilter_graph_create_filter(&filt_src,
1980  avfilter_get_by_name("buffer"),
1981  "ffplay_buffer", buffersrc_args, NULL,
1982  graph)) < 0)
1983  goto fail;
1984 
1985  ret = avfilter_graph_create_filter(&filt_out,
1986  avfilter_get_by_name("buffersink"),
1987  "ffplay_buffersink", NULL, NULL, graph);
1988  if (ret < 0)
1989  goto fail;
1990 
1991  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto fail;
1993 
1994  last_filter = filt_out;
1995 
1996 /* Note: this macro adds a filter before the lastly added filter, so the
1997  * processing order of the filters is in reverse */
1998 #define INSERT_FILT(name, arg) do { \
1999  AVFilterContext *filt_ctx; \
2000  \
2001  ret = avfilter_graph_create_filter(&filt_ctx, \
2002  avfilter_get_by_name(name), \
2003  "ffplay_" name, arg, NULL, graph); \
2004  if (ret < 0) \
2005  goto fail; \
2006  \
2007  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2008  if (ret < 0) \
2009  goto fail; \
2010  \
2011  last_filter = filt_ctx; \
2012 } while (0)
2013 
2014  /* SDL YUV code is not handling odd width/height for some driver
2015  * combinations, therefore we crop the picture to an even width/height. */
2016  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
2017 
2018  if (autorotate) {
2019  AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
2020  if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
2021  if (!strcmp(rotate_tag->value, "90")) {
2022  INSERT_FILT("transpose", "clock");
2023  } else if (!strcmp(rotate_tag->value, "180")) {
2024  INSERT_FILT("hflip", NULL);
2025  INSERT_FILT("vflip", NULL);
2026  } else if (!strcmp(rotate_tag->value, "270")) {
2027  INSERT_FILT("transpose", "cclock");
2028  } else {
2029  char rotate_buf[64];
2030  snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
2031  INSERT_FILT("rotate", rotate_buf);
2032  }
2033  }
2034  }
2035 
2036  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2037  goto fail;
2038 
2039  is->in_video_filter = filt_src;
2040  is->out_video_filter = filt_out;
2041 
2042 fail:
2043  return ret;
2044 }
2045 
2046 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2047 {
2049  int sample_rates[2] = { 0, -1 };
2050  int64_t channel_layouts[2] = { 0, -1 };
2051  int channels[2] = { 0, -1 };
2052  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2053  char aresample_swr_opts[512] = "";
2054  AVDictionaryEntry *e = NULL;
2055  char asrc_args[256];
2056  int ret;
2057 
2058  avfilter_graph_free(&is->agraph);
2059  if (!(is->agraph = avfilter_graph_alloc()))
2060  return AVERROR(ENOMEM);
2061 
2062  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2063  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2064  if (strlen(aresample_swr_opts))
2065  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2066  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2067 
2068  ret = snprintf(asrc_args, sizeof(asrc_args),
2069  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2070  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2071  is->audio_filter_src.channels,
2072  1, is->audio_filter_src.freq);
2073  if (is->audio_filter_src.channel_layout)
2074  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2075  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2076 
2077  ret = avfilter_graph_create_filter(&filt_asrc,
2078  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2079  asrc_args, NULL, is->agraph);
2080  if (ret < 0)
2081  goto end;
2082 
2083 
2084  ret = avfilter_graph_create_filter(&filt_asink,
2085  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2086  NULL, NULL, is->agraph);
2087  if (ret < 0)
2088  goto end;
2089 
2090  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2091  goto end;
2092  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2093  goto end;
2094 
2095  if (force_output_format) {
2096  channel_layouts[0] = is->audio_tgt.channel_layout;
2097  channels [0] = is->audio_tgt.channels;
2098  sample_rates [0] = is->audio_tgt.freq;
2099  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2100  goto end;
2101  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2102  goto end;
2103  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2104  goto end;
2105  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2106  goto end;
2107  }
2108 
2109 
2110  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2111  goto end;
2112 
2113  is->in_audio_filter = filt_asrc;
2114  is->out_audio_filter = filt_asink;
2115 
2116 end:
2117  if (ret < 0)
2118  avfilter_graph_free(&is->agraph);
2119  return ret;
2120 }
2121 #endif /* CONFIG_AVFILTER */
2122 
2123 static int audio_thread(void *arg)
2124 {
2125  VideoState *is = arg;
2126  AVFrame *frame = av_frame_alloc();
2127  Frame *af;
2128 #if CONFIG_AVFILTER
2129  int last_serial = -1;
2130  int64_t dec_channel_layout;
2131  int reconfigure;
2132 #endif
2133  int got_frame = 0;
2134  AVRational tb;
2135  int ret = 0;
2136 
2137  if (!frame)
2138  return AVERROR(ENOMEM);
2139 
2140  do {
2141  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2142  goto the_end;
2143 
2144  if (got_frame) {
2145  tb = (AVRational){1, frame->sample_rate};
2146 
2147 #if CONFIG_AVFILTER
2148  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2149 
2150  reconfigure =
2151  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2152  frame->format, av_frame_get_channels(frame)) ||
2153  is->audio_filter_src.channel_layout != dec_channel_layout ||
2154  is->audio_filter_src.freq != frame->sample_rate ||
2155  is->auddec.pkt_serial != last_serial;
2156 
2157  if (reconfigure) {
2158  char buf1[1024], buf2[1024];
2159  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2160  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2162  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2163  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2165 
2166  is->audio_filter_src.fmt = frame->format;
2167  is->audio_filter_src.channels = av_frame_get_channels(frame);
2168  is->audio_filter_src.channel_layout = dec_channel_layout;
2169  is->audio_filter_src.freq = frame->sample_rate;
2170  last_serial = is->auddec.pkt_serial;
2171 
2172  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2173  goto the_end;
2174  }
2175 
2176  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2177  goto the_end;
2178 
2179  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2180  tb = is->out_audio_filter->inputs[0]->time_base;
2181 #endif
2182  if (!(af = frame_queue_peek_writable(&is->sampq)))
2183  goto the_end;
2184 
2185  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2186  af->pos = av_frame_get_pkt_pos(frame);
2187  af->serial = is->auddec.pkt_serial;
2188  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2189 
2190  av_frame_move_ref(af->frame, frame);
2191  frame_queue_push(&is->sampq);
2192 
2193 #if CONFIG_AVFILTER
2194  if (is->audioq.serial != is->auddec.pkt_serial)
2195  break;
2196  }
2197  if (ret == AVERROR_EOF)
2198  is->auddec.finished = is->auddec.pkt_serial;
2199 #endif
2200  }
2201  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2202  the_end:
2203 #if CONFIG_AVFILTER
2204  avfilter_graph_free(&is->agraph);
2205 #endif
2206  av_frame_free(&frame);
2207  return ret;
2208 }
2209 
2210 static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2211 {
2213  d->decoder_tid = SDL_CreateThread(fn, arg);
2214 }
2215 
2216 static int video_thread(void *arg)
2217 {
2218  VideoState *is = arg;
2219  AVFrame *frame = av_frame_alloc();
2220  double pts;
2221  double duration;
2222  int ret;
2224  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2225 
2226 #if CONFIG_AVFILTER
2228  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2229  int last_w = 0;
2230  int last_h = 0;
2231  enum AVPixelFormat last_format = -2;
2232  int last_serial = -1;
2233  int last_vfilter_idx = 0;
2234 #endif
2235 
2236  if (!frame)
2237  return AVERROR(ENOMEM);
2238 
2239  for (;;) {
2240  ret = get_video_frame(is, frame);
2241  if (ret < 0)
2242  goto the_end;
2243  if (!ret)
2244  continue;
2245 
2246 #if CONFIG_AVFILTER
2247  if ( last_w != frame->width
2248  || last_h != frame->height
2249  || last_format != frame->format
2250  || last_serial != is->viddec.pkt_serial
2251  || last_vfilter_idx != is->vfilter_idx) {
2253  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2254  last_w, last_h,
2255  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2256  frame->width, frame->height,
2257  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2258  avfilter_graph_free(&graph);
2259  graph = avfilter_graph_alloc();
2260  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2261  SDL_Event event;
2262  event.type = FF_QUIT_EVENT;
2263  event.user.data1 = is;
2264  SDL_PushEvent(&event);
2265  goto the_end;
2266  }
2267  filt_in = is->in_video_filter;
2268  filt_out = is->out_video_filter;
2269  last_w = frame->width;
2270  last_h = frame->height;
2271  last_format = frame->format;
2272  last_serial = is->viddec.pkt_serial;
2273  last_vfilter_idx = is->vfilter_idx;
2274  frame_rate = filt_out->inputs[0]->frame_rate;
2275  }
2276 
2277  ret = av_buffersrc_add_frame(filt_in, frame);
2278  if (ret < 0)
2279  goto the_end;
2280 
2281  while (ret >= 0) {
2282  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2283 
2284  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2285  if (ret < 0) {
2286  if (ret == AVERROR_EOF)
2287  is->viddec.finished = is->viddec.pkt_serial;
2288  ret = 0;
2289  break;
2290  }
2291 
2293  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2294  is->frame_last_filter_delay = 0;
2295  tb = filt_out->inputs[0]->time_base;
2296 #endif
2297  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2298  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2299  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2300  av_frame_unref(frame);
2301 #if CONFIG_AVFILTER
2302  }
2303 #endif
2304 
2305  if (ret < 0)
2306  goto the_end;
2307  }
2308  the_end:
2309 #if CONFIG_AVFILTER
2310  avfilter_graph_free(&graph);
2311 #endif
2312  av_frame_free(&frame);
2313  return 0;
2314 }
2315 
2316 static int subtitle_thread(void *arg)
2317 {
2318  VideoState *is = arg;
2319  Frame *sp;
2320  int got_subtitle;
2321  double pts;
2322  int i, j;
2323  int r, g, b, y, u, v, a;
2324 
2325  for (;;) {
2326  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2327  return 0;
2328 
2329  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2330  break;
2331 
2332  pts = 0;
2333 
2334  if (got_subtitle && sp->sub.format == 0) {
2335  if (sp->sub.pts != AV_NOPTS_VALUE)
2336  pts = sp->sub.pts / (double)AV_TIME_BASE;
2337  sp->pts = pts;
2338  sp->serial = is->subdec.pkt_serial;
2339 
2340  for (i = 0; i < sp->sub.num_rects; i++)
2341  {
2342  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2343  {
2344  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2345  y = RGB_TO_Y_CCIR(r, g, b);
2346  u = RGB_TO_U_CCIR(r, g, b, 0);
2347  v = RGB_TO_V_CCIR(r, g, b, 0);
2348  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2349  }
2350  }
2351 
2352  /* now we can update the picture count */
2353  frame_queue_push(&is->subpq);
2354  } else if (got_subtitle) {
2355  avsubtitle_free(&sp->sub);
2356  }
2357  }
2358  return 0;
2359 }
2360 
2361 /* copy samples for viewing in editor window */
2362 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2363 {
2364  int size, len;
2365 
2366  size = samples_size / sizeof(short);
2367  while (size > 0) {
2369  if (len > size)
2370  len = size;
2371  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2372  samples += len;
2373  is->sample_array_index += len;
2375  is->sample_array_index = 0;
2376  size -= len;
2377  }
2378 }
2379 
2380 /* return the wanted number of samples to get better sync if sync_type is video
2381  * or external master clock */
2382 static int synchronize_audio(VideoState *is, int nb_samples)
2383 {
2384  int wanted_nb_samples = nb_samples;
2385 
2386  /* if not master, then we try to remove or add samples to correct the clock */
2388  double diff, avg_diff;
2389  int min_nb_samples, max_nb_samples;
2390 
2391  diff = get_clock(&is->audclk) - get_master_clock(is);
2392 
2393  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2394  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2396  /* not enough measures to have a correct estimate */
2397  is->audio_diff_avg_count++;
2398  } else {
2399  /* estimate the A-V difference */
2400  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2401 
2402  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2403  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2404  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2405  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2406  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2407  }
2408  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2409  diff, avg_diff, wanted_nb_samples - nb_samples,
2411  }
2412  } else {
2413  /* too big difference : may be initial PTS errors, so
2414  reset A-V filter */
2415  is->audio_diff_avg_count = 0;
2416  is->audio_diff_cum = 0;
2417  }
2418  }
2419 
2420  return wanted_nb_samples;
2421 }
2422 
2423 /**
2424  * Decode one audio frame and return its uncompressed size.
2425  *
2426  * The processed audio frame is decoded, converted if required, and
2427  * stored in is->audio_buf, with size in bytes given by the return
2428  * value.
2429  */
2431 {
2432  int data_size, resampled_data_size;
2433  int64_t dec_channel_layout;
2434  av_unused double audio_clock0;
2435  int wanted_nb_samples;
2436  Frame *af;
2437 
2438  if (is->paused)
2439  return -1;
2440 
2441  do {
2442  if (!(af = frame_queue_peek_readable(&is->sampq)))
2443  return -1;
2444  frame_queue_next(&is->sampq);
2445  } while (af->serial != is->audioq.serial);
2446 
2448  af->frame->nb_samples,
2449  af->frame->format, 1);
2450 
2451  dec_channel_layout =
2454  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2455 
2456  if (af->frame->format != is->audio_src.fmt ||
2457  dec_channel_layout != is->audio_src.channel_layout ||
2458  af->frame->sample_rate != is->audio_src.freq ||
2459  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2460  swr_free(&is->swr_ctx);
2463  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2464  0, NULL);
2465  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2467  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2470  swr_free(&is->swr_ctx);
2471  return -1;
2472  }
2473  is->audio_src.channel_layout = dec_channel_layout;
2475  is->audio_src.freq = af->frame->sample_rate;
2476  is->audio_src.fmt = af->frame->format;
2477  }
2478 
2479  if (is->swr_ctx) {
2480  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2481  uint8_t **out = &is->audio_buf1;
2482  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2483  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2484  int len2;
2485  if (out_size < 0) {
2486  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2487  return -1;
2488  }
2489  if (wanted_nb_samples != af->frame->nb_samples) {
2490  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2491  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2492  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2493  return -1;
2494  }
2495  }
2496  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2497  if (!is->audio_buf1)
2498  return AVERROR(ENOMEM);
2499  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2500  if (len2 < 0) {
2501  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2502  return -1;
2503  }
2504  if (len2 == out_count) {
2505  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2506  if (swr_init(is->swr_ctx) < 0)
2507  swr_free(&is->swr_ctx);
2508  }
2509  is->audio_buf = is->audio_buf1;
2510  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2511  } else {
2512  is->audio_buf = af->frame->data[0];
2513  resampled_data_size = data_size;
2514  }
2515 
2516  audio_clock0 = is->audio_clock;
2517  /* update the audio clock with the pts */
2518  if (!isnan(af->pts))
2519  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2520  else
2521  is->audio_clock = NAN;
2522  is->audio_clock_serial = af->serial;
2523 #ifdef DEBUG
2524  {
2525  static double last_clock;
2526  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2527  is->audio_clock - last_clock,
2528  is->audio_clock, audio_clock0);
2529  last_clock = is->audio_clock;
2530  }
2531 #endif
2532  return resampled_data_size;
2533 }
2534 
2535 /* prepare a new audio buffer */
2536 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2537 {
2538  VideoState *is = opaque;
2539  int audio_size, len1;
2540 
2542 
2543  while (len > 0) {
2544  if (is->audio_buf_index >= is->audio_buf_size) {
2545  audio_size = audio_decode_frame(is);
2546  if (audio_size < 0) {
2547  /* if error, just output silence */
2548  is->audio_buf = is->silence_buf;
2549  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2550  } else {
2551  if (is->show_mode != SHOW_MODE_VIDEO)
2552  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2553  is->audio_buf_size = audio_size;
2554  }
2555  is->audio_buf_index = 0;
2556  }
2557  len1 = is->audio_buf_size - is->audio_buf_index;
2558  if (len1 > len)
2559  len1 = len;
2560  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2561  len -= len1;
2562  stream += len1;
2563  is->audio_buf_index += len1;
2564  }
2566  /* Let's assume the audio driver that is used by SDL has two periods. */
2567  if (!isnan(is->audio_clock)) {
2569  sync_clock_to_slave(&is->extclk, &is->audclk);
2570  }
2571 }
2572 
2573 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2574 {
2575  SDL_AudioSpec wanted_spec, spec;
2576  const char *env;
2577  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2578  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2579  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2580 
2581  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2582  if (env) {
2583  wanted_nb_channels = atoi(env);
2584  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2585  }
2586  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2587  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2588  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2589  }
2590  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2591  wanted_spec.channels = wanted_nb_channels;
2592  wanted_spec.freq = wanted_sample_rate;
2593  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2594  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2595  return -1;
2596  }
2597  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2598  next_sample_rate_idx--;
2599  wanted_spec.format = AUDIO_S16SYS;
2600  wanted_spec.silence = 0;
2601  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2602  wanted_spec.callback = sdl_audio_callback;
2603  wanted_spec.userdata = opaque;
2604  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2605  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2606  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2607  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2608  if (!wanted_spec.channels) {
2609  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2610  wanted_spec.channels = wanted_nb_channels;
2611  if (!wanted_spec.freq) {
2613  "No more combinations to try, audio open failed\n");
2614  return -1;
2615  }
2616  }
2617  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2618  }
2619  if (spec.format != AUDIO_S16SYS) {
2621  "SDL advised audio format %d is not supported!\n", spec.format);
2622  return -1;
2623  }
2624  if (spec.channels != wanted_spec.channels) {
2625  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2626  if (!wanted_channel_layout) {
2628  "SDL advised channel count %d is not supported!\n", spec.channels);
2629  return -1;
2630  }
2631  }
2632 
2633  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2634  audio_hw_params->freq = spec.freq;
2635  audio_hw_params->channel_layout = wanted_channel_layout;
2636  audio_hw_params->channels = spec.channels;
2637  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2638  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2639  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2640  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2641  return -1;
2642  }
2643  return spec.size;
2644 }
2645 
2646 /* open a given stream. Return 0 if OK */
2647 static int stream_component_open(VideoState *is, int stream_index)
2648 {
2649  AVFormatContext *ic = is->ic;
2650  AVCodecContext *avctx;
2651  AVCodec *codec;
2652  const char *forced_codec_name = NULL;
2653  AVDictionary *opts;
2654  AVDictionaryEntry *t = NULL;
2655  int sample_rate, nb_channels;
2656  int64_t channel_layout;
2657  int ret = 0;
2658  int stream_lowres = lowres;
2659 
2660  if (stream_index < 0 || stream_index >= ic->nb_streams)
2661  return -1;
2662  avctx = ic->streams[stream_index]->codec;
2663 
2664  codec = avcodec_find_decoder(avctx->codec_id);
2665 
2666  switch(avctx->codec_type){
2667  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2668  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2669  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2670  }
2671  if (forced_codec_name)
2672  codec = avcodec_find_decoder_by_name(forced_codec_name);
2673  if (!codec) {
2674  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2675  "No codec could be found with name '%s'\n", forced_codec_name);
2676  else av_log(NULL, AV_LOG_WARNING,
2677  "No codec could be found with id %d\n", avctx->codec_id);
2678  return -1;
2679  }
2680 
2681  avctx->codec_id = codec->id;
2682  if(stream_lowres > av_codec_get_max_lowres(codec)){
2683  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2684  av_codec_get_max_lowres(codec));
2685  stream_lowres = av_codec_get_max_lowres(codec);
2686  }
2687  av_codec_set_lowres(avctx, stream_lowres);
2688 
2689  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2690  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2691  if(codec->capabilities & CODEC_CAP_DR1)
2692  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2693 
2694  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2695  if (!av_dict_get(opts, "threads", NULL, 0))
2696  av_dict_set(&opts, "threads", "auto", 0);
2697  if (stream_lowres)
2698  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2699  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2700  av_dict_set(&opts, "refcounted_frames", "1", 0);
2701  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2702  goto fail;
2703  }
2704  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2705  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2707  goto fail;
2708  }
2709 
2710  is->eof = 0;
2711  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2712  switch (avctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714 #if CONFIG_AVFILTER
2715  {
2716  AVFilterLink *link;
2717 
2718  is->audio_filter_src.freq = avctx->sample_rate;
2719  is->audio_filter_src.channels = avctx->channels;
2720  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2721  is->audio_filter_src.fmt = avctx->sample_fmt;
2722  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2723  goto fail;
2724  link = is->out_audio_filter->inputs[0];
2725  sample_rate = link->sample_rate;
2726  nb_channels = link->channels;
2727  channel_layout = link->channel_layout;
2728  }
2729 #else
2730  sample_rate = avctx->sample_rate;
2731  nb_channels = avctx->channels;
2732  channel_layout = avctx->channel_layout;
2733 #endif
2734 
2735  /* prepare audio output */
2736  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2737  goto fail;
2738  is->audio_hw_buf_size = ret;
2739  is->audio_src = is->audio_tgt;
2740  is->audio_buf_size = 0;
2741  is->audio_buf_index = 0;
2742 
2743  /* init averaging filter */
2744  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2745  is->audio_diff_avg_count = 0;
2746  /* since we do not have a precise anough audio fifo fullness,
2747  we correct audio sync only if larger than this threshold */
2749 
2750  is->audio_stream = stream_index;
2751  is->audio_st = ic->streams[stream_index];
2752 
2753  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2755  is->auddec.start_pts = is->audio_st->start_time;
2757  }
2758  decoder_start(&is->auddec, audio_thread, is);
2759  SDL_PauseAudio(0);
2760  break;
2761  case AVMEDIA_TYPE_VIDEO:
2762  is->video_stream = stream_index;
2763  is->video_st = ic->streams[stream_index];
2764 
2765  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2766  decoder_start(&is->viddec, video_thread, is);
2767  is->queue_attachments_req = 1;
2768  break;
2769  case AVMEDIA_TYPE_SUBTITLE:
2770  is->subtitle_stream = stream_index;
2771  is->subtitle_st = ic->streams[stream_index];
2772 
2773  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2775  break;
2776  default:
2777  break;
2778  }
2779 
2780 fail:
2781  av_dict_free(&opts);
2782 
2783  return ret;
2784 }
2785 
2786 static void stream_component_close(VideoState *is, int stream_index)
2787 {
2788  AVFormatContext *ic = is->ic;
2789  AVCodecContext *avctx;
2790 
2791  if (stream_index < 0 || stream_index >= ic->nb_streams)
2792  return;
2793  avctx = ic->streams[stream_index]->codec;
2794 
2795  switch (avctx->codec_type) {
2796  case AVMEDIA_TYPE_AUDIO:
2797  decoder_abort(&is->auddec, &is->sampq);
2798  SDL_CloseAudio();
2799  decoder_destroy(&is->auddec);
2800  swr_free(&is->swr_ctx);
2801  av_freep(&is->audio_buf1);
2802  is->audio_buf1_size = 0;
2803  is->audio_buf = NULL;
2804 
2805  if (is->rdft) {
2806  av_rdft_end(is->rdft);
2807  av_freep(&is->rdft_data);
2808  is->rdft = NULL;
2809  is->rdft_bits = 0;
2810  }
2811  break;
2812  case AVMEDIA_TYPE_VIDEO:
2813  decoder_abort(&is->viddec, &is->pictq);
2814  decoder_destroy(&is->viddec);
2815  break;
2816  case AVMEDIA_TYPE_SUBTITLE:
2817  decoder_abort(&is->subdec, &is->subpq);
2818  decoder_destroy(&is->subdec);
2819  break;
2820  default:
2821  break;
2822  }
2823 
2824  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2825  avcodec_close(avctx);
2826  switch (avctx->codec_type) {
2827  case AVMEDIA_TYPE_AUDIO:
2828  is->audio_st = NULL;
2829  is->audio_stream = -1;
2830  break;
2831  case AVMEDIA_TYPE_VIDEO:
2832  is->video_st = NULL;
2833  is->video_stream = -1;
2834  break;
2835  case AVMEDIA_TYPE_SUBTITLE:
2836  is->subtitle_st = NULL;
2837  is->subtitle_stream = -1;
2838  break;
2839  default:
2840  break;
2841  }
2842 }
2843 
2844 static int decode_interrupt_cb(void *ctx)
2845 {
2846  VideoState *is = ctx;
2847  return is->abort_request;
2848 }
2849 
2851 {
2852  if( !strcmp(s->iformat->name, "rtp")
2853  || !strcmp(s->iformat->name, "rtsp")
2854  || !strcmp(s->iformat->name, "sdp")
2855  )
2856  return 1;
2857 
2858  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2859  || !strncmp(s->filename, "udp:", 4)
2860  )
2861  )
2862  return 1;
2863  return 0;
2864 }
2865 
2866 /* this thread gets the stream from the disk or the network */
2867 static int read_thread(void *arg)
2868 {
2869  VideoState *is = arg;
2870  AVFormatContext *ic = NULL;
2871  int err, i, ret;
2872  int st_index[AVMEDIA_TYPE_NB];
2873  AVPacket pkt1, *pkt = &pkt1;
2874  int64_t stream_start_time;
2875  int pkt_in_play_range = 0;
2876  AVDictionaryEntry *t;
2877  AVDictionary **opts;
2878  int orig_nb_streams;
2879  SDL_mutex *wait_mutex = SDL_CreateMutex();
2880  int scan_all_pmts_set = 0;
2881  int64_t pkt_ts;
2882 
2883  memset(st_index, -1, sizeof(st_index));
2884  is->last_video_stream = is->video_stream = -1;
2885  is->last_audio_stream = is->audio_stream = -1;
2886  is->last_subtitle_stream = is->subtitle_stream = -1;
2887  is->eof = 0;
2888 
2889  ic = avformat_alloc_context();
2890  if (!ic) {
2891  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2892  ret = AVERROR(ENOMEM);
2893  goto fail;
2894  }
2896  ic->interrupt_callback.opaque = is;
2897  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2898  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2899  scan_all_pmts_set = 1;
2900  }
2901  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2902  if (err < 0) {
2903  print_error(is->filename, err);
2904  ret = -1;
2905  goto fail;
2906  }
2907  if (scan_all_pmts_set)
2908  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2909 
2911  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2913  goto fail;
2914  }
2915  is->ic = ic;
2916 
2917  if (genpts)
2918  ic->flags |= AVFMT_FLAG_GENPTS;
2919 
2921 
2923  orig_nb_streams = ic->nb_streams;
2924 
2925  err = avformat_find_stream_info(ic, opts);
2926 
2927  for (i = 0; i < orig_nb_streams; i++)
2928  av_dict_free(&opts[i]);
2929  av_freep(&opts);
2930 
2931  if (err < 0) {
2933  "%s: could not find codec parameters\n", is->filename);
2934  ret = -1;
2935  goto fail;
2936  }
2937 
2938  if (ic->pb)
2939  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2940 
2941  if (seek_by_bytes < 0)
2942  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2943 
2944  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2945 
2946  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2947  window_title = av_asprintf("%s - %s", t->value, input_filename);
2948 
2949  /* if seeking requested, we execute it */
2950  if (start_time != AV_NOPTS_VALUE) {
2951  int64_t timestamp;
2952 
2953  timestamp = start_time;
2954  /* add the stream start time */
2955  if (ic->start_time != AV_NOPTS_VALUE)
2956  timestamp += ic->start_time;
2957  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2958  if (ret < 0) {
2959  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2960  is->filename, (double)timestamp / AV_TIME_BASE);
2961  }
2962  }
2963 
2964  is->realtime = is_realtime(ic);
2965 
2966  if (show_status)
2967  av_dump_format(ic, 0, is->filename, 0);
2968 
2969  for (i = 0; i < ic->nb_streams; i++) {
2970  AVStream *st = ic->streams[i];
2971  enum AVMediaType type = st->codec->codec_type;
2972  st->discard = AVDISCARD_ALL;
2973  if (wanted_stream_spec[type] && st_index[type] == -1)
2974  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2975  st_index[type] = i;
2976  }
2977  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2978  if (wanted_stream_spec[i] && st_index[i] == -1) {
2979  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2980  st_index[i] = INT_MAX;
2981  }
2982  }
2983 
2984  if (!video_disable)
2985  st_index[AVMEDIA_TYPE_VIDEO] =
2987  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2988  if (!audio_disable)
2989  st_index[AVMEDIA_TYPE_AUDIO] =
2991  st_index[AVMEDIA_TYPE_AUDIO],
2992  st_index[AVMEDIA_TYPE_VIDEO],
2993  NULL, 0);
2995  st_index[AVMEDIA_TYPE_SUBTITLE] =
2997  st_index[AVMEDIA_TYPE_SUBTITLE],
2998  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2999  st_index[AVMEDIA_TYPE_AUDIO] :
3000  st_index[AVMEDIA_TYPE_VIDEO]),
3001  NULL, 0);
3002 
3003  is->show_mode = show_mode;
3004  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3005  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
3006  AVCodecContext *avctx = st->codec;
3008  if (avctx->width)
3009  set_default_window_size(avctx->width, avctx->height, sar);
3010  }
3011 
3012  /* open the streams */
3013  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3014  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
3015  }
3016 
3017  ret = -1;
3018  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3019  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
3020  }
3021  if (is->show_mode == SHOW_MODE_NONE)
3022  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3023 
3024  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3025  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
3026  }
3027 
3028  if (is->video_stream < 0 && is->audio_stream < 0) {
3029  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3030  is->filename);
3031  ret = -1;
3032  goto fail;
3033  }
3034 
3035  if (infinite_buffer < 0 && is->realtime)
3036  infinite_buffer = 1;
3037 
3038  for (;;) {
3039  if (is->abort_request)
3040  break;
3041  if (is->paused != is->last_paused) {
3042  is->last_paused = is->paused;
3043  if (is->paused)
3044  is->read_pause_return = av_read_pause(ic);
3045  else
3046  av_read_play(ic);
3047  }
3048 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3049  if (is->paused &&
3050  (!strcmp(ic->iformat->name, "rtsp") ||
3051  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3052  /* wait 10 ms to avoid trying to get another packet */
3053  /* XXX: horrible */
3054  SDL_Delay(10);
3055  continue;
3056  }
3057 #endif
3058  if (is->seek_req) {
3059  int64_t seek_target = is->seek_pos;
3060  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3061  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3062 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3063 // of the seek_pos/seek_rel variables
3064 
3065  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3066  if (ret < 0) {
3068  "%s: error while seeking\n", is->ic->filename);
3069  } else {
3070  if (is->audio_stream >= 0) {
3071  packet_queue_flush(&is->audioq);
3072  packet_queue_put(&is->audioq, &flush_pkt);
3073  }
3074  if (is->subtitle_stream >= 0) {
3076  packet_queue_put(&is->subtitleq, &flush_pkt);
3077  }
3078  if (is->video_stream >= 0) {
3079  packet_queue_flush(&is->videoq);
3080  packet_queue_put(&is->videoq, &flush_pkt);
3081  }
3082  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3083  set_clock(&is->extclk, NAN, 0);
3084  } else {
3085  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3086  }
3087  }
3088  is->seek_req = 0;
3089  is->queue_attachments_req = 1;
3090  is->eof = 0;
3091  if (is->paused)
3092  step_to_next_frame(is);
3093  }
3094  if (is->queue_attachments_req) {
3096  AVPacket copy;
3097  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3098  goto fail;
3099  packet_queue_put(&is->videoq, &copy);
3101  }
3102  is->queue_attachments_req = 0;
3103  }
3104 
3105  /* if the queue are full, no need to read more */
3106  if (infinite_buffer<1 &&
3107  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3108  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3109  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3111  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3112  /* wait 10 ms */
3113  SDL_LockMutex(wait_mutex);
3114  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3115  SDL_UnlockMutex(wait_mutex);
3116  continue;
3117  }
3118  if (!is->paused &&
3119  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3120  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3121  if (loop != 1 && (!loop || --loop)) {
3122  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3123  } else if (autoexit) {
3124  ret = AVERROR_EOF;
3125  goto fail;
3126  }
3127  }
3128  ret = av_read_frame(ic, pkt);
3129  if (ret < 0) {
3130  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3131  if (is->video_stream >= 0)
3133  if (is->audio_stream >= 0)
3135  if (is->subtitle_stream >= 0)
3137  is->eof = 1;
3138  }
3139  if (ic->pb && ic->pb->error)
3140  break;
3141  SDL_LockMutex(wait_mutex);
3142  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3143  SDL_UnlockMutex(wait_mutex);
3144  continue;
3145  } else {
3146  is->eof = 0;
3147  }
3148  /* check if packet is in play range specified by user, then queue, otherwise discard */
3149  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3150  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3151  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3152  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3153  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3154  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3155  <= ((double)duration / 1000000);
3156  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3157  packet_queue_put(&is->audioq, pkt);
3158  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3160  packet_queue_put(&is->videoq, pkt);
3161  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3162  packet_queue_put(&is->subtitleq, pkt);
3163  } else {
3164  av_free_packet(pkt);
3165  }
3166  }
3167  /* wait until the end */
3168  while (!is->abort_request) {
3169  SDL_Delay(100);
3170  }
3171 
3172  ret = 0;
3173  fail:
3174  /* close each stream */
3175  if (is->audio_stream >= 0)
3177  if (is->video_stream >= 0)
3179  if (is->subtitle_stream >= 0)
3181  if (ic) {
3182  avformat_close_input(&ic);
3183  is->ic = NULL;
3184  }
3185 
3186  if (ret != 0) {
3187  SDL_Event event;
3188 
3189  event.type = FF_QUIT_EVENT;
3190  event.user.data1 = is;
3191  SDL_PushEvent(&event);
3192  }
3193  SDL_DestroyMutex(wait_mutex);
3194  return 0;
3195 }
3196 
3197 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3198 {
3199  VideoState *is;
3200 
3201  is = av_mallocz(sizeof(VideoState));
3202  if (!is)
3203  return NULL;
3204  av_strlcpy(is->filename, filename, sizeof(is->filename));
3205  is->iformat = iformat;
3206  is->ytop = 0;
3207  is->xleft = 0;
3208 
3209  /* start video display */
3210  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3211  goto fail;
3212  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3213  goto fail;
3214  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3215  goto fail;
3216 
3217  packet_queue_init(&is->videoq);
3218  packet_queue_init(&is->audioq);
3220 
3221  is->continue_read_thread = SDL_CreateCond();
3222 
3223  init_clock(&is->vidclk, &is->videoq.serial);
3224  init_clock(&is->audclk, &is->audioq.serial);
3225  init_clock(&is->extclk, &is->extclk.serial);
3226  is->audio_clock_serial = -1;
3227  is->av_sync_type = av_sync_type;
3228  is->read_tid = SDL_CreateThread(read_thread, is);
3229  if (!is->read_tid) {
3230 fail:
3231  stream_close(is);
3232  return NULL;
3233  }
3234  return is;
3235 }
3236 
3238 {
3239  AVFormatContext *ic = is->ic;
3240  int start_index, stream_index;
3241  int old_index;
3242  AVStream *st;
3243  AVProgram *p = NULL;
3244  int nb_streams = is->ic->nb_streams;
3245 
3246  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3247  start_index = is->last_video_stream;
3248  old_index = is->video_stream;
3249  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3250  start_index = is->last_audio_stream;
3251  old_index = is->audio_stream;
3252  } else {
3253  start_index = is->last_subtitle_stream;
3254  old_index = is->subtitle_stream;
3255  }
3256  stream_index = start_index;
3257 
3258  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3260  if (p) {
3261  nb_streams = p->nb_stream_indexes;
3262  for (start_index = 0; start_index < nb_streams; start_index++)
3263  if (p->stream_index[start_index] == stream_index)
3264  break;
3265  if (start_index == nb_streams)
3266  start_index = -1;
3267  stream_index = start_index;
3268  }
3269  }
3270 
3271  for (;;) {
3272  if (++stream_index >= nb_streams)
3273  {
3274  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3275  {
3276  stream_index = -1;
3277  is->last_subtitle_stream = -1;
3278  goto the_end;
3279  }
3280  if (start_index == -1)
3281  return;
3282  stream_index = 0;
3283  }
3284  if (stream_index == start_index)
3285  return;
3286  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3287  if (st->codec->codec_type == codec_type) {
3288  /* check that parameters are OK */
3289  switch (codec_type) {
3290  case AVMEDIA_TYPE_AUDIO:
3291  if (st->codec->sample_rate != 0 &&
3292  st->codec->channels != 0)
3293  goto the_end;
3294  break;
3295  case AVMEDIA_TYPE_VIDEO:
3296  case AVMEDIA_TYPE_SUBTITLE:
3297  goto the_end;
3298  default:
3299  break;
3300  }
3301  }
3302  }
3303  the_end:
3304  if (p && stream_index != -1)
3305  stream_index = p->stream_index[stream_index];
3306  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3307  av_get_media_type_string(codec_type),
3308  old_index,
3309  stream_index);
3310 
3311  stream_component_close(is, old_index);
3312  stream_component_open(is, stream_index);
3313 }
3314 
3315 
3317 {
3318 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3319  /* OS X needs to reallocate the SDL overlays */
3320  int i;
3321  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3322  is->pictq.queue[i].reallocate = 1;
3323 #endif
3325  video_open(is, 1, NULL);
3326 }
3327 
3329 {
3330  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3331  int next = is->show_mode;
3332  do {
3333  next = (next + 1) % SHOW_MODE_NB;
3334  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3335  if (is->show_mode != next) {
3337  is->xleft, is->ytop, is->width, is->height,
3338  bgcolor, 1);
3339  is->force_refresh = 1;
3340  is->show_mode = next;
3341  }
3342 }
3343 
3344 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3345  double remaining_time = 0.0;
3346  SDL_PumpEvents();
3347  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3349  SDL_ShowCursor(0);
3350  cursor_hidden = 1;
3351  }
3352  if (remaining_time > 0.0)
3353  av_usleep((int64_t)(remaining_time * 1000000.0));
3354  remaining_time = REFRESH_RATE;
3355  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3356  video_refresh(is, &remaining_time);
3357  SDL_PumpEvents();
3358  }
3359 }
3360 
3361 static void seek_chapter(VideoState *is, int incr)
3362 {
3363  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3364  int i;
3365 
3366  if (!is->ic->nb_chapters)
3367  return;
3368 
3369  /* find the current chapter */
3370  for (i = 0; i < is->ic->nb_chapters; i++) {
3371  AVChapter *ch = is->ic->chapters[i];
3372  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3373  i--;
3374  break;
3375  }
3376  }
3377 
3378  i += incr;
3379  i = FFMAX(i, 0);
3380  if (i >= is->ic->nb_chapters)
3381  return;
3382 
3383  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3384  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3385  AV_TIME_BASE_Q), 0, 0);
3386 }
3387 
3388 /* handle an event sent by the GUI */
3389 static void event_loop(VideoState *cur_stream)
3390 {
3391  SDL_Event event;
3392  double incr, pos, frac;
3393 
3394  for (;;) {
3395  double x;
3396  refresh_loop_wait_event(cur_stream, &event);
3397  switch (event.type) {
3398  case SDL_KEYDOWN:
3399  if (exit_on_keydown) {
3400  do_exit(cur_stream);
3401  break;
3402  }
3403  switch (event.key.keysym.sym) {
3404  case SDLK_ESCAPE:
3405  case SDLK_q:
3406  do_exit(cur_stream);
3407  break;
3408  case SDLK_f:
3409  toggle_full_screen(cur_stream);
3410  cur_stream->force_refresh = 1;
3411  break;
3412  case SDLK_p:
3413  case SDLK_SPACE:
3414  toggle_pause(cur_stream);
3415  break;
3416  case SDLK_s: // S: Step to next frame
3417  step_to_next_frame(cur_stream);
3418  break;
3419  case SDLK_a:
3421  break;
3422  case SDLK_v:
3424  break;
3425  case SDLK_c:
3429  break;
3430  case SDLK_t:
3432  break;
3433  case SDLK_w:
3434 #if CONFIG_AVFILTER
3435  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3436  if (++cur_stream->vfilter_idx >= nb_vfilters)
3437  cur_stream->vfilter_idx = 0;
3438  } else {
3439  cur_stream->vfilter_idx = 0;
3440  toggle_audio_display(cur_stream);
3441  }
3442 #else
3443  toggle_audio_display(cur_stream);
3444 #endif
3445  break;
3446  case SDLK_PAGEUP:
3447  if (cur_stream->ic->nb_chapters <= 1) {
3448  incr = 600.0;
3449  goto do_seek;
3450  }
3451  seek_chapter(cur_stream, 1);
3452  break;
3453  case SDLK_PAGEDOWN:
3454  if (cur_stream->ic->nb_chapters <= 1) {
3455  incr = -600.0;
3456  goto do_seek;
3457  }
3458  seek_chapter(cur_stream, -1);
3459  break;
3460  case SDLK_LEFT:
3461  incr = -10.0;
3462  goto do_seek;
3463  case SDLK_RIGHT:
3464  incr = 10.0;
3465  goto do_seek;
3466  case SDLK_UP:
3467  incr = 60.0;
3468  goto do_seek;
3469  case SDLK_DOWN:
3470  incr = -60.0;
3471  do_seek:
3472  if (seek_by_bytes) {
3473  pos = -1;
3474  if (pos < 0 && cur_stream->video_stream >= 0)
3475  pos = frame_queue_last_pos(&cur_stream->pictq);
3476  if (pos < 0 && cur_stream->audio_stream >= 0)
3477  pos = frame_queue_last_pos(&cur_stream->sampq);
3478  if (pos < 0)
3479  pos = avio_tell(cur_stream->ic->pb);
3480  if (cur_stream->ic->bit_rate)
3481  incr *= cur_stream->ic->bit_rate / 8.0;
3482  else
3483  incr *= 180000.0;
3484  pos += incr;
3485  stream_seek(cur_stream, pos, incr, 1);
3486  } else {
3487  pos = get_master_clock(cur_stream);
3488  if (isnan(pos))
3489  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3490  pos += incr;
3491  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3492  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3493  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3494  }
3495  break;
3496  default:
3497  break;
3498  }
3499  break;
3500  case SDL_VIDEOEXPOSE:
3501  cur_stream->force_refresh = 1;
3502  break;
3503  case SDL_MOUSEBUTTONDOWN:
3504  if (exit_on_mousedown) {
3505  do_exit(cur_stream);
3506  break;
3507  }
3508  case SDL_MOUSEMOTION:
3509  if (cursor_hidden) {
3510  SDL_ShowCursor(1);
3511  cursor_hidden = 0;
3512  }
3514  if (event.type == SDL_MOUSEBUTTONDOWN) {
3515  x = event.button.x;
3516  } else {
3517  if (event.motion.state != SDL_PRESSED)
3518  break;
3519  x = event.motion.x;
3520  }
3521  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3522  uint64_t size = avio_size(cur_stream->ic->pb);
3523  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3524  } else {
3525  int64_t ts;
3526  int ns, hh, mm, ss;
3527  int tns, thh, tmm, tss;
3528  tns = cur_stream->ic->duration / 1000000LL;
3529  thh = tns / 3600;
3530  tmm = (tns % 3600) / 60;
3531  tss = (tns % 60);
3532  frac = x / cur_stream->width;
3533  ns = frac * tns;
3534  hh = ns / 3600;
3535  mm = (ns % 3600) / 60;
3536  ss = (ns % 60);
3538  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3539  hh, mm, ss, thh, tmm, tss);
3540  ts = frac * cur_stream->ic->duration;
3541  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3542  ts += cur_stream->ic->start_time;
3543  stream_seek(cur_stream, ts, 0, 0);
3544  }
3545  break;
3546  case SDL_VIDEORESIZE:
3547  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3548  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3549  if (!screen) {
3550  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3551  do_exit(cur_stream);
3552  }
3553  screen_width = cur_stream->width = screen->w;
3554  screen_height = cur_stream->height = screen->h;
3555  cur_stream->force_refresh = 1;
3556  break;
3557  case SDL_QUIT:
3558  case FF_QUIT_EVENT:
3559  do_exit(cur_stream);
3560  break;
3561  case FF_ALLOC_EVENT:
3562  alloc_picture(event.user.data1);
3563  break;
3564  default:
3565  break;
3566  }
3567  }
3568 }
3569 
3570 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3571 {
3572  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3573  return opt_default(NULL, "video_size", arg);
3574 }
3575 
3576 static int opt_width(void *optctx, const char *opt, const char *arg)
3577 {
3578  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3579  return 0;
3580 }
3581 
3582 static int opt_height(void *optctx, const char *opt, const char *arg)
3583 {
3584  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3585  return 0;
3586 }
3587 
3588 static int opt_format(void *optctx, const char *opt, const char *arg)
3589 {
3590  file_iformat = av_find_input_format(arg);
3591  if (!file_iformat) {
3592  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3593  return AVERROR(EINVAL);
3594  }
3595  return 0;
3596 }
3597 
3598 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3599 {
3600  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3601  return opt_default(NULL, "pixel_format", arg);
3602 }
3603 
3604 static int opt_sync(void *optctx, const char *opt, const char *arg)
3605 {
3606  if (!strcmp(arg, "audio"))
3608  else if (!strcmp(arg, "video"))
3610  else if (!strcmp(arg, "ext"))
3612  else {
3613  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3614  exit(1);
3615  }
3616  return 0;
3617 }
3618 
3619 static int opt_seek(void *optctx, const char *opt, const char *arg)
3620 {
3621  start_time = parse_time_or_die(opt, arg, 1);
3622  return 0;
3623 }
3624 
3625 static int opt_duration(void *optctx, const char *opt, const char *arg)
3626 {
3627  duration = parse_time_or_die(opt, arg, 1);
3628  return 0;
3629 }
3630 
3631 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3632 {
3633  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3634  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3635  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3636  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3637  return 0;
3638 }
3639 
3640 static void opt_input_file(void *optctx, const char *filename)
3641 {
3642  if (input_filename) {
3644  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3645  filename, input_filename);
3646  exit(1);
3647  }
3648  if (!strcmp(filename, "-"))
3649  filename = "pipe:";
3650  input_filename = filename;
3651 }
3652 
3653 static int opt_codec(void *optctx, const char *opt, const char *arg)
3654 {
3655  const char *spec = strchr(opt, ':');
3656  if (!spec) {
3658  "No media specifier was specified in '%s' in option '%s'\n",
3659  arg, opt);
3660  return AVERROR(EINVAL);
3661  }
3662  spec++;
3663  switch (spec[0]) {
3664  case 'a' : audio_codec_name = arg; break;
3665  case 's' : subtitle_codec_name = arg; break;
3666  case 'v' : video_codec_name = arg; break;
3667  default:
3669  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3670  return AVERROR(EINVAL);
3671  }
3672  return 0;
3673 }
3674 
3675 static int dummy;
3676 
3677 static const OptionDef options[] = {
3678 #include "cmdutils_common_opts.h"
3679  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3680  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3681  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3682  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3683  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3684  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3685  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3686  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3687  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3688  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3689  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3690  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3691  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3692  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3693  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3694  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3695  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3696  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3697  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3698  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3699  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3700  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3701  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3702  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3703  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3704  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3705  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3706  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3707  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3708 #if CONFIG_AVFILTER
3709  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3710  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3711 #endif
3712  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3713  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3714  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3715  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3716  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3717  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3718  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3719  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3720  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3721  { NULL, },
3722 };
3723 
3724 static void show_usage(void)
3725 {
3726  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3727  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3728  av_log(NULL, AV_LOG_INFO, "\n");
3729 }
3730 
3731 void show_help_default(const char *opt, const char *arg)
3732 {
3734  show_usage();
3735  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3736  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3737  printf("\n");
3740 #if !CONFIG_AVFILTER
3742 #else
3744 #endif
3745  printf("\nWhile playing:\n"
3746  "q, ESC quit\n"
3747  "f toggle full screen\n"
3748  "p, SPC pause\n"
3749  "a cycle audio channel in the current program\n"
3750  "v cycle video channel\n"
3751  "t cycle subtitle channel in the current program\n"
3752  "c cycle program\n"
3753  "w cycle video filters or show modes\n"
3754  "s activate frame-step mode\n"
3755  "left/right seek backward/forward 10 seconds\n"
3756  "down/up seek backward/forward 1 minute\n"
3757  "page down/page up seek backward/forward 10 minutes\n"
3758  "mouse click seek to percentage in file corresponding to fraction of width\n"
3759  );
3760 }
3761 
3762 static int lockmgr(void **mtx, enum AVLockOp op)
3763 {
3764  switch(op) {
3765  case AV_LOCK_CREATE:
3766  *mtx = SDL_CreateMutex();
3767  if(!*mtx)
3768  return 1;
3769  return 0;
3770  case AV_LOCK_OBTAIN:
3771  return !!SDL_LockMutex(*mtx);
3772  case AV_LOCK_RELEASE:
3773  return !!SDL_UnlockMutex(*mtx);
3774  case AV_LOCK_DESTROY:
3775  SDL_DestroyMutex(*mtx);
3776  return 0;
3777  }
3778  return 1;
3779 }
3780 
3781 /* Called from the main */
3782 int main(int argc, char **argv)
3783 {
3784  int flags;
3785  VideoState *is;
3786  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3787 
3789  parse_loglevel(argc, argv, options);
3790 
3791  /* register all codecs, demux and protocols */
3792 #if CONFIG_AVDEVICE
3794 #endif
3795 #if CONFIG_AVFILTER
3797 #endif
3798  av_register_all();
3800 
3801  init_opts();
3802 
3803  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3804  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3805 
3806  show_banner(argc, argv, options);
3807 
3808  parse_options(NULL, argc, argv, options, opt_input_file);
3809 
3810  if (!input_filename) {
3811  show_usage();
3812  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3814  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3815  exit(1);
3816  }
3817 
3818  if (display_disable) {
3819  video_disable = 1;
3820  }
3821  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3822  if (audio_disable)
3823  flags &= ~SDL_INIT_AUDIO;
3824  if (display_disable)
3825  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3826 #if !defined(_WIN32) && !defined(__APPLE__)
3827  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3828 #endif
3829  if (SDL_Init (flags)) {
3830  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3831  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3832  exit(1);
3833  }
3834 
3835  if (!display_disable) {
3836  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3837  fs_screen_width = vi->current_w;
3838  fs_screen_height = vi->current_h;
3839  }
3840 
3841  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3842  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3843  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3844 
3846  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3847  do_exit(NULL);
3848  }
3849 
3850  av_init_packet(&flush_pkt);
3851  flush_pkt.data = (uint8_t *)&flush_pkt;
3852 
3853  is = stream_open(input_filename, file_iformat);
3854  if (!is) {
3855  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3856  do_exit(NULL);
3857  }
3858 
3859  event_loop(is);
3860 
3861  /* never returns */
3862 
3863  return 0;
3864 }