FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control */
77 #define SDL_VOLUME_STEP (SDL_MIX_MAXVOLUME / 50)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 static unsigned sws_flags = SWS_BICUBIC;
109 
110 typedef struct MyAVPacketList {
113  int serial;
115 
116 typedef struct PacketQueue {
119  int size;
120  int64_t duration;
122  int serial;
123  SDL_mutex *mutex;
124  SDL_cond *cond;
125 } PacketQueue;
126 
127 #define VIDEO_PICTURE_QUEUE_SIZE 3
128 #define SUBPICTURE_QUEUE_SIZE 16
129 #define SAMPLE_QUEUE_SIZE 9
130 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
131 
132 typedef struct AudioParams {
133  int freq;
134  int channels;
135  int64_t channel_layout;
139 } AudioParams;
140 
141 typedef struct Clock {
142  double pts; /* clock base */
143  double pts_drift; /* clock base minus time at which we updated the clock */
144  double last_updated;
145  double speed;
146  int serial; /* clock is based on a packet with this serial */
147  int paused;
148  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
149 } Clock;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  AVSubtitleRect **subrects; /* rescaled subtitle rectangles in yuva */
156  int serial;
157  double pts; /* presentation timestamp for the frame */
158  double duration; /* estimated duration of the frame */
159  int64_t pos; /* byte position of the frame in the input file */
160  SDL_Overlay *bmp;
163  int width;
164  int height;
166 } Frame;
167 
168 typedef struct FrameQueue {
170  int rindex;
171  int windex;
172  int size;
173  int max_size;
176  SDL_mutex *mutex;
177  SDL_cond *cond;
179 } FrameQueue;
180 
181 enum {
182  AV_SYNC_AUDIO_MASTER, /* default choice */
184  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
185 };
186 
187 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
233 
235 
237 
238  double audio_clock;
240  double audio_diff_cum; /* used for AV difference average computation */
249  unsigned int audio_buf_size; /* in bytes */
250  unsigned int audio_buf1_size;
251  int audio_buf_index; /* in bytes */
254  int muted;
256 #if CONFIG_AVFILTER
257  struct AudioParams audio_filter_src;
258 #endif
263 
264  enum ShowMode {
266  } show_mode;
273  int xpos;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
287 #if !CONFIG_AVFILTER
289 #endif
292  int eof;
293 
294  char *filename;
296  int step;
297 
298 #if CONFIG_AVFILTER
299  int vfilter_idx;
300  AVFilterContext *in_video_filter; // the first filter in the video chain
301  AVFilterContext *out_video_filter; // the last filter in the video chain
302  AVFilterContext *in_audio_filter; // the first filter in the audio chain
303  AVFilterContext *out_audio_filter; // the last filter in the audio chain
304  AVFilterGraph *agraph; // audio filter graph
305 #endif
306 
308 
310 } VideoState;
311 
312 /* options specified by the user */
314 static const char *input_filename;
315 static const char *window_title;
316 static int fs_screen_width;
317 static int fs_screen_height;
318 static int default_width = 640;
319 static int default_height = 480;
320 static int screen_width = 0;
321 static int screen_height = 0;
322 static int audio_disable;
323 static int video_disable;
324 static int subtitle_disable;
325 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
326 static int seek_by_bytes = -1;
327 static int display_disable;
328 static int show_status = 1;
330 static int64_t start_time = AV_NOPTS_VALUE;
331 static int64_t duration = AV_NOPTS_VALUE;
332 static int fast = 0;
333 static int genpts = 0;
334 static int lowres = 0;
335 static int decoder_reorder_pts = -1;
336 static int autoexit;
337 static int exit_on_keydown;
338 static int exit_on_mousedown;
339 static int loop = 1;
340 static int framedrop = -1;
341 static int infinite_buffer = -1;
342 static enum ShowMode show_mode = SHOW_MODE_NONE;
343 static const char *audio_codec_name;
344 static const char *subtitle_codec_name;
345 static const char *video_codec_name;
346 double rdftspeed = 0.02;
347 static int64_t cursor_last_shown;
348 static int cursor_hidden = 0;
349 #if CONFIG_AVFILTER
350 static const char **vfilters_list = NULL;
351 static int nb_vfilters = 0;
352 static char *afilters = NULL;
353 #endif
354 static int autorotate = 1;
355 
356 /* current context */
357 static int is_full_screen;
358 static int64_t audio_callback_time;
359 
361 
362 #define FF_ALLOC_EVENT (SDL_USEREVENT)
363 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
364 
365 static SDL_Surface *screen;
366 
367 #if CONFIG_AVFILTER
368 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
369 {
370  GROW_ARRAY(vfilters_list, nb_vfilters);
371  vfilters_list[nb_vfilters - 1] = arg;
372  return 0;
373 }
374 #endif
375 
376 static inline
377 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
378  enum AVSampleFormat fmt2, int64_t channel_count2)
379 {
380  /* If channel count == 1, planar and non-planar formats are the same */
381  if (channel_count1 == 1 && channel_count2 == 1)
383  else
384  return channel_count1 != channel_count2 || fmt1 != fmt2;
385 }
386 
387 static inline
388 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
389 {
390  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
391  return channel_layout;
392  else
393  return 0;
394 }
395 
396 static void free_picture(Frame *vp);
397 
399 {
400  MyAVPacketList *pkt1;
401 
402  if (q->abort_request)
403  return -1;
404 
405  pkt1 = av_malloc(sizeof(MyAVPacketList));
406  if (!pkt1)
407  return -1;
408  pkt1->pkt = *pkt;
409  pkt1->next = NULL;
410  if (pkt == &flush_pkt)
411  q->serial++;
412  pkt1->serial = q->serial;
413 
414  if (!q->last_pkt)
415  q->first_pkt = pkt1;
416  else
417  q->last_pkt->next = pkt1;
418  q->last_pkt = pkt1;
419  q->nb_packets++;
420  q->size += pkt1->pkt.size + sizeof(*pkt1);
421  q->duration += pkt1->pkt.duration;
422  /* XXX: should duplicate packet data in DV case */
423  SDL_CondSignal(q->cond);
424  return 0;
425 }
426 
428 {
429  int ret;
430 
431  SDL_LockMutex(q->mutex);
432  ret = packet_queue_put_private(q, pkt);
433  SDL_UnlockMutex(q->mutex);
434 
435  if (pkt != &flush_pkt && ret < 0)
436  av_packet_unref(pkt);
437 
438  return ret;
439 }
440 
441 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
442 {
443  AVPacket pkt1, *pkt = &pkt1;
444  av_init_packet(pkt);
445  pkt->data = NULL;
446  pkt->size = 0;
447  pkt->stream_index = stream_index;
448  return packet_queue_put(q, pkt);
449 }
450 
451 /* packet queue handling */
453 {
454  memset(q, 0, sizeof(PacketQueue));
455  q->mutex = SDL_CreateMutex();
456  if (!q->mutex) {
457  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
458  return AVERROR(ENOMEM);
459  }
460  q->cond = SDL_CreateCond();
461  if (!q->cond) {
462  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
463  return AVERROR(ENOMEM);
464  }
465  q->abort_request = 1;
466  return 0;
467 }
468 
470 {
471  MyAVPacketList *pkt, *pkt1;
472 
473  SDL_LockMutex(q->mutex);
474  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
475  pkt1 = pkt->next;
476  av_packet_unref(&pkt->pkt);
477  av_freep(&pkt);
478  }
479  q->last_pkt = NULL;
480  q->first_pkt = NULL;
481  q->nb_packets = 0;
482  q->size = 0;
483  q->duration = 0;
484  SDL_UnlockMutex(q->mutex);
485 }
486 
488 {
490  SDL_DestroyMutex(q->mutex);
491  SDL_DestroyCond(q->cond);
492 }
493 
495 {
496  SDL_LockMutex(q->mutex);
497 
498  q->abort_request = 1;
499 
500  SDL_CondSignal(q->cond);
501 
502  SDL_UnlockMutex(q->mutex);
503 }
504 
506 {
507  SDL_LockMutex(q->mutex);
508  q->abort_request = 0;
509  packet_queue_put_private(q, &flush_pkt);
510  SDL_UnlockMutex(q->mutex);
511 }
512 
513 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
514 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
515 {
516  MyAVPacketList *pkt1;
517  int ret;
518 
519  SDL_LockMutex(q->mutex);
520 
521  for (;;) {
522  if (q->abort_request) {
523  ret = -1;
524  break;
525  }
526 
527  pkt1 = q->first_pkt;
528  if (pkt1) {
529  q->first_pkt = pkt1->next;
530  if (!q->first_pkt)
531  q->last_pkt = NULL;
532  q->nb_packets--;
533  q->size -= pkt1->pkt.size + sizeof(*pkt1);
534  q->duration -= pkt1->pkt.duration;
535  *pkt = pkt1->pkt;
536  if (serial)
537  *serial = pkt1->serial;
538  av_free(pkt1);
539  ret = 1;
540  break;
541  } else if (!block) {
542  ret = 0;
543  break;
544  } else {
545  SDL_CondWait(q->cond, q->mutex);
546  }
547  }
548  SDL_UnlockMutex(q->mutex);
549  return ret;
550 }
551 
552 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
553  memset(d, 0, sizeof(Decoder));
554  d->avctx = avctx;
555  d->queue = queue;
556  d->empty_queue_cond = empty_queue_cond;
558 }
559 
561  int got_frame = 0;
562 
563  do {
564  int ret = -1;
565 
566  if (d->queue->abort_request)
567  return -1;
568 
569  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
570  AVPacket pkt;
571  do {
572  if (d->queue->nb_packets == 0)
573  SDL_CondSignal(d->empty_queue_cond);
574  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
575  return -1;
576  if (pkt.data == flush_pkt.data) {
578  d->finished = 0;
579  d->next_pts = d->start_pts;
580  d->next_pts_tb = d->start_pts_tb;
581  }
582  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
583  av_packet_unref(&d->pkt);
584  d->pkt_temp = d->pkt = pkt;
585  d->packet_pending = 1;
586  }
587 
588  switch (d->avctx->codec_type) {
589  case AVMEDIA_TYPE_VIDEO:
590  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
591  if (got_frame) {
592  if (decoder_reorder_pts == -1) {
593  frame->pts = av_frame_get_best_effort_timestamp(frame);
594  } else if (decoder_reorder_pts) {
595  frame->pts = frame->pkt_pts;
596  } else {
597  frame->pts = frame->pkt_dts;
598  }
599  }
600  break;
601  case AVMEDIA_TYPE_AUDIO:
602  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
603  if (got_frame) {
604  AVRational tb = (AVRational){1, frame->sample_rate};
605  if (frame->pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
607  else if (frame->pkt_pts != AV_NOPTS_VALUE)
608  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
609  else if (d->next_pts != AV_NOPTS_VALUE)
610  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
611  if (frame->pts != AV_NOPTS_VALUE) {
612  d->next_pts = frame->pts + frame->nb_samples;
613  d->next_pts_tb = tb;
614  }
615  }
616  break;
618  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
619  break;
620  }
621 
622  if (ret < 0) {
623  d->packet_pending = 0;
624  } else {
625  d->pkt_temp.dts =
627  if (d->pkt_temp.data) {
629  ret = d->pkt_temp.size;
630  d->pkt_temp.data += ret;
631  d->pkt_temp.size -= ret;
632  if (d->pkt_temp.size <= 0)
633  d->packet_pending = 0;
634  } else {
635  if (!got_frame) {
636  d->packet_pending = 0;
637  d->finished = d->pkt_serial;
638  }
639  }
640  }
641  } while (!got_frame && !d->finished);
642 
643  return got_frame;
644 }
645 
646 static void decoder_destroy(Decoder *d) {
647  av_packet_unref(&d->pkt);
649 }
650 
652 {
653  int i;
654  for (i = 0; i < vp->sub.num_rects; i++) {
655  av_freep(&vp->subrects[i]->data[0]);
656  av_freep(&vp->subrects[i]);
657  }
658  av_freep(&vp->subrects);
659  av_frame_unref(vp->frame);
660  avsubtitle_free(&vp->sub);
661 }
662 
663 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
664 {
665  int i;
666  memset(f, 0, sizeof(FrameQueue));
667  if (!(f->mutex = SDL_CreateMutex())) {
668  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
669  return AVERROR(ENOMEM);
670  }
671  if (!(f->cond = SDL_CreateCond())) {
672  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
673  return AVERROR(ENOMEM);
674  }
675  f->pktq = pktq;
676  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
677  f->keep_last = !!keep_last;
678  for (i = 0; i < f->max_size; i++)
679  if (!(f->queue[i].frame = av_frame_alloc()))
680  return AVERROR(ENOMEM);
681  return 0;
682 }
683 
685 {
686  int i;
687  for (i = 0; i < f->max_size; i++) {
688  Frame *vp = &f->queue[i];
690  av_frame_free(&vp->frame);
691  free_picture(vp);
692  }
693  SDL_DestroyMutex(f->mutex);
694  SDL_DestroyCond(f->cond);
695 }
696 
698 {
699  SDL_LockMutex(f->mutex);
700  SDL_CondSignal(f->cond);
701  SDL_UnlockMutex(f->mutex);
702 }
703 
705 {
706  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
707 }
708 
710 {
711  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
712 }
713 
715 {
716  return &f->queue[f->rindex];
717 }
718 
720 {
721  /* wait until we have space to put a new frame */
722  SDL_LockMutex(f->mutex);
723  while (f->size >= f->max_size &&
724  !f->pktq->abort_request) {
725  SDL_CondWait(f->cond, f->mutex);
726  }
727  SDL_UnlockMutex(f->mutex);
728 
729  if (f->pktq->abort_request)
730  return NULL;
731 
732  return &f->queue[f->windex];
733 }
734 
736 {
737  /* wait until we have a readable a new frame */
738  SDL_LockMutex(f->mutex);
739  while (f->size - f->rindex_shown <= 0 &&
740  !f->pktq->abort_request) {
741  SDL_CondWait(f->cond, f->mutex);
742  }
743  SDL_UnlockMutex(f->mutex);
744 
745  if (f->pktq->abort_request)
746  return NULL;
747 
748  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
749 }
750 
752 {
753  if (++f->windex == f->max_size)
754  f->windex = 0;
755  SDL_LockMutex(f->mutex);
756  f->size++;
757  SDL_CondSignal(f->cond);
758  SDL_UnlockMutex(f->mutex);
759 }
760 
762 {
763  if (f->keep_last && !f->rindex_shown) {
764  f->rindex_shown = 1;
765  return;
766  }
768  if (++f->rindex == f->max_size)
769  f->rindex = 0;
770  SDL_LockMutex(f->mutex);
771  f->size--;
772  SDL_CondSignal(f->cond);
773  SDL_UnlockMutex(f->mutex);
774 }
775 
776 /* return the number of undisplayed frames in the queue */
778 {
779  return f->size - f->rindex_shown;
780 }
781 
782 /* return last shown position */
784 {
785  Frame *fp = &f->queue[f->rindex];
786  if (f->rindex_shown && fp->serial == f->pktq->serial)
787  return fp->pos;
788  else
789  return -1;
790 }
791 
792 static void decoder_abort(Decoder *d, FrameQueue *fq)
793 {
795  frame_queue_signal(fq);
796  SDL_WaitThread(d->decoder_tid, NULL);
797  d->decoder_tid = NULL;
799 }
800 
801 static inline void fill_rectangle(SDL_Surface *screen,
802  int x, int y, int w, int h, int color, int update)
803 {
804  SDL_Rect rect;
805  rect.x = x;
806  rect.y = y;
807  rect.w = w;
808  rect.h = h;
809  SDL_FillRect(screen, &rect, color);
810  if (update && w > 0 && h > 0)
811  SDL_UpdateRect(screen, x, y, w, h);
812 }
813 
814 /* draw only the border of a rectangle */
815 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
816 {
817  int w1, w2, h1, h2;
818 
819  /* fill the background */
820  w1 = x;
821  if (w1 < 0)
822  w1 = 0;
823  w2 = width - (x + w);
824  if (w2 < 0)
825  w2 = 0;
826  h1 = y;
827  if (h1 < 0)
828  h1 = 0;
829  h2 = height - (y + h);
830  if (h2 < 0)
831  h2 = 0;
833  xleft, ytop,
834  w1, height,
835  color, update);
837  xleft + width - w2, ytop,
838  w2, height,
839  color, update);
841  xleft + w1, ytop,
842  width - w1 - w2, h1,
843  color, update);
845  xleft + w1, ytop + height - h2,
846  width - w1 - w2, h2,
847  color, update);
848 }
849 
850 #define ALPHA_BLEND(a, oldp, newp, s)\
851 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
852 
853 
854 
855 #define BPP 1
856 
857 static void blend_subrect(uint8_t **data, int *linesize, const AVSubtitleRect *rect, int imgw, int imgh)
858 {
859  int x, y, Y, U, V, A;
860  uint8_t *lum, *cb, *cr;
861  int dstx, dsty, dstw, dsth;
862  const AVSubtitleRect *src = rect;
863 
864  dstw = av_clip(rect->w, 0, imgw);
865  dsth = av_clip(rect->h, 0, imgh);
866  dstx = av_clip(rect->x, 0, imgw - dstw);
867  dsty = av_clip(rect->y, 0, imgh - dsth);
868  lum = data[0] + dstx + dsty * linesize[0];
869  cb = data[1] + dstx/2 + (dsty >> 1) * linesize[1];
870  cr = data[2] + dstx/2 + (dsty >> 1) * linesize[2];
871 
872  for (y = 0; y<dsth; y++) {
873  for (x = 0; x<dstw; x++) {
874  Y = src->data[0][x + y*src->linesize[0]];
875  A = src->data[3][x + y*src->linesize[3]];
876  lum[0] = ALPHA_BLEND(A, lum[0], Y, 0);
877  lum++;
878  }
879  lum += linesize[0] - dstw;
880  }
881 
882  for (y = 0; y<dsth/2; y++) {
883  for (x = 0; x<dstw/2; x++) {
884  U = src->data[1][x + y*src->linesize[1]];
885  V = src->data[2][x + y*src->linesize[2]];
886  A = src->data[3][2*x + 2*y *src->linesize[3]]
887  + src->data[3][2*x + 1 + 2*y *src->linesize[3]]
888  + src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]]
889  + src->data[3][2*x + (2*y+1)*src->linesize[3]];
890  cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0);
891  cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0);
892  cb++;
893  cr++;
894  }
895  cb += linesize[1] - dstw/2;
896  cr += linesize[2] - dstw/2;
897  }
898 }
899 
900 static void free_picture(Frame *vp)
901 {
902  if (vp->bmp) {
903  SDL_FreeYUVOverlay(vp->bmp);
904  vp->bmp = NULL;
905  }
906 }
907 
908 static void calculate_display_rect(SDL_Rect *rect,
909  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
910  int pic_width, int pic_height, AVRational pic_sar)
911 {
912  float aspect_ratio;
913  int width, height, x, y;
914 
915  if (pic_sar.num == 0)
916  aspect_ratio = 0;
917  else
918  aspect_ratio = av_q2d(pic_sar);
919 
920  if (aspect_ratio <= 0.0)
921  aspect_ratio = 1.0;
922  aspect_ratio *= (float)pic_width / (float)pic_height;
923 
924  /* XXX: we suppose the screen has a 1.0 pixel ratio */
925  height = scr_height;
926  width = lrint(height * aspect_ratio) & ~1;
927  if (width > scr_width) {
928  width = scr_width;
929  height = lrint(width / aspect_ratio) & ~1;
930  }
931  x = (scr_width - width) / 2;
932  y = (scr_height - height) / 2;
933  rect->x = scr_xleft + x;
934  rect->y = scr_ytop + y;
935  rect->w = FFMAX(width, 1);
936  rect->h = FFMAX(height, 1);
937 }
938 
940 {
941  Frame *vp;
942  Frame *sp;
943  SDL_Rect rect;
944  int i;
945 
946  vp = frame_queue_peek_last(&is->pictq);
947  if (vp->bmp) {
948  if (is->subtitle_st) {
949  if (frame_queue_nb_remaining(&is->subpq) > 0) {
950  sp = frame_queue_peek(&is->subpq);
951 
952  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
953  uint8_t *data[4];
954  int linesize[4];
955 
956  SDL_LockYUVOverlay (vp->bmp);
957 
958  data[0] = vp->bmp->pixels[0];
959  data[1] = vp->bmp->pixels[2];
960  data[2] = vp->bmp->pixels[1];
961 
962  linesize[0] = vp->bmp->pitches[0];
963  linesize[1] = vp->bmp->pitches[2];
964  linesize[2] = vp->bmp->pitches[1];
965 
966  for (i = 0; i < sp->sub.num_rects; i++)
967  blend_subrect(data, linesize, sp->subrects[i],
968  vp->bmp->w, vp->bmp->h);
969 
970  SDL_UnlockYUVOverlay (vp->bmp);
971  }
972  }
973  }
974 
975  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
976 
977  SDL_DisplayYUVOverlay(vp->bmp, &rect);
978 
979  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
980  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
981  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
982  is->last_display_rect = rect;
983  }
984  }
985 }
986 
987 static inline int compute_mod(int a, int b)
988 {
989  return a < 0 ? a%b + b : a%b;
990 }
991 
993 {
994  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
995  int ch, channels, h, h2, bgcolor, fgcolor;
996  int64_t time_diff;
997  int rdft_bits, nb_freq;
998 
999  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1000  ;
1001  nb_freq = 1 << (rdft_bits - 1);
1002 
1003  /* compute display index : center on currently output samples */
1004  channels = s->audio_tgt.channels;
1005  nb_display_channels = channels;
1006  if (!s->paused) {
1007  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1008  n = 2 * channels;
1009  delay = s->audio_write_buf_size;
1010  delay /= n;
1011 
1012  /* to be more precise, we take into account the time spent since
1013  the last buffer computation */
1014  if (audio_callback_time) {
1015  time_diff = av_gettime_relative() - audio_callback_time;
1016  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1017  }
1018 
1019  delay += 2 * data_used;
1020  if (delay < data_used)
1021  delay = data_used;
1022 
1023  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1024  if (s->show_mode == SHOW_MODE_WAVES) {
1025  h = INT_MIN;
1026  for (i = 0; i < 1000; i += channels) {
1027  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1028  int a = s->sample_array[idx];
1029  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1030  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1031  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1032  int score = a - d;
1033  if (h < score && (b ^ c) < 0) {
1034  h = score;
1035  i_start = idx;
1036  }
1037  }
1038  }
1039 
1040  s->last_i_start = i_start;
1041  } else {
1042  i_start = s->last_i_start;
1043  }
1044 
1045  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1046  if (s->show_mode == SHOW_MODE_WAVES) {
1048  s->xleft, s->ytop, s->width, s->height,
1049  bgcolor, 0);
1050 
1051  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1052 
1053  /* total height for one channel */
1054  h = s->height / nb_display_channels;
1055  /* graph height / 2 */
1056  h2 = (h * 9) / 20;
1057  for (ch = 0; ch < nb_display_channels; ch++) {
1058  i = i_start + ch;
1059  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1060  for (x = 0; x < s->width; x++) {
1061  y = (s->sample_array[i] * h2) >> 15;
1062  if (y < 0) {
1063  y = -y;
1064  ys = y1 - y;
1065  } else {
1066  ys = y1;
1067  }
1069  s->xleft + x, ys, 1, y,
1070  fgcolor, 0);
1071  i += channels;
1072  if (i >= SAMPLE_ARRAY_SIZE)
1073  i -= SAMPLE_ARRAY_SIZE;
1074  }
1075  }
1076 
1077  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1078 
1079  for (ch = 1; ch < nb_display_channels; ch++) {
1080  y = s->ytop + ch * h;
1082  s->xleft, y, s->width, 1,
1083  fgcolor, 0);
1084  }
1085  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1086  } else {
1087  nb_display_channels= FFMIN(nb_display_channels, 2);
1088  if (rdft_bits != s->rdft_bits) {
1089  av_rdft_end(s->rdft);
1090  av_free(s->rdft_data);
1091  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1092  s->rdft_bits = rdft_bits;
1093  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1094  }
1095  if (!s->rdft || !s->rdft_data){
1096  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1097  s->show_mode = SHOW_MODE_WAVES;
1098  } else {
1099  FFTSample *data[2];
1100  for (ch = 0; ch < nb_display_channels; ch++) {
1101  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1102  i = i_start + ch;
1103  for (x = 0; x < 2 * nb_freq; x++) {
1104  double w = (x-nb_freq) * (1.0 / nb_freq);
1105  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1106  i += channels;
1107  if (i >= SAMPLE_ARRAY_SIZE)
1108  i -= SAMPLE_ARRAY_SIZE;
1109  }
1110  av_rdft_calc(s->rdft, data[ch]);
1111  }
1112  /* Least efficient way to do this, we should of course
1113  * directly access it but it is more than fast enough. */
1114  for (y = 0; y < s->height; y++) {
1115  double w = 1 / sqrt(nb_freq);
1116  int a = sqrt(w * hypot(data[0][2 * y + 0], data[0][2 * y + 1]));
1117  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1118  : a;
1119  a = FFMIN(a, 255);
1120  b = FFMIN(b, 255);
1121  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1122 
1124  s->xpos, s->height-y, 1, 1,
1125  fgcolor, 0);
1126  }
1127  }
1128  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1129  if (!s->paused)
1130  s->xpos++;
1131  if (s->xpos >= s->width)
1132  s->xpos= s->xleft;
1133  }
1134 }
1135 
1136 static void stream_component_close(VideoState *is, int stream_index)
1137 {
1138  AVFormatContext *ic = is->ic;
1139  AVCodecParameters *codecpar;
1140 
1141  if (stream_index < 0 || stream_index >= ic->nb_streams)
1142  return;
1143  codecpar = ic->streams[stream_index]->codecpar;
1144 
1145  switch (codecpar->codec_type) {
1146  case AVMEDIA_TYPE_AUDIO:
1147  decoder_abort(&is->auddec, &is->sampq);
1148  SDL_CloseAudio();
1149  decoder_destroy(&is->auddec);
1150  swr_free(&is->swr_ctx);
1151  av_freep(&is->audio_buf1);
1152  is->audio_buf1_size = 0;
1153  is->audio_buf = NULL;
1154 
1155  if (is->rdft) {
1156  av_rdft_end(is->rdft);
1157  av_freep(&is->rdft_data);
1158  is->rdft = NULL;
1159  is->rdft_bits = 0;
1160  }
1161  break;
1162  case AVMEDIA_TYPE_VIDEO:
1163  decoder_abort(&is->viddec, &is->pictq);
1164  decoder_destroy(&is->viddec);
1165  break;
1166  case AVMEDIA_TYPE_SUBTITLE:
1167  decoder_abort(&is->subdec, &is->subpq);
1168  decoder_destroy(&is->subdec);
1169  break;
1170  default:
1171  break;
1172  }
1173 
1174  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1175  switch (codecpar->codec_type) {
1176  case AVMEDIA_TYPE_AUDIO:
1177  is->audio_st = NULL;
1178  is->audio_stream = -1;
1179  break;
1180  case AVMEDIA_TYPE_VIDEO:
1181  is->video_st = NULL;
1182  is->video_stream = -1;
1183  break;
1184  case AVMEDIA_TYPE_SUBTITLE:
1185  is->subtitle_st = NULL;
1186  is->subtitle_stream = -1;
1187  break;
1188  default:
1189  break;
1190  }
1191 }
1192 
1193 static void stream_close(VideoState *is)
1194 {
1195  /* XXX: use a special url_shutdown call to abort parse cleanly */
1196  is->abort_request = 1;
1197  SDL_WaitThread(is->read_tid, NULL);
1198 
1199  /* close each stream */
1200  if (is->audio_stream >= 0)
1202  if (is->video_stream >= 0)
1204  if (is->subtitle_stream >= 0)
1206 
1207  avformat_close_input(&is->ic);
1208 
1212 
1213  /* free all pictures */
1214  frame_queue_destory(&is->pictq);
1215  frame_queue_destory(&is->sampq);
1216  frame_queue_destory(&is->subpq);
1217  SDL_DestroyCond(is->continue_read_thread);
1218 #if !CONFIG_AVFILTER
1220 #endif
1222  av_free(is->filename);
1223  av_free(is);
1224 }
1225 
1226 static void do_exit(VideoState *is)
1227 {
1228  if (is) {
1229  stream_close(is);
1230  }
1232  uninit_opts();
1233 #if CONFIG_AVFILTER
1234  av_freep(&vfilters_list);
1235 #endif
1237  if (show_status)
1238  printf("\n");
1239  SDL_Quit();
1240  av_log(NULL, AV_LOG_QUIET, "%s", "");
1241  exit(0);
1242 }
1243 
1244 static void sigterm_handler(int sig)
1245 {
1246  exit(123);
1247 }
1248 
1250 {
1251  SDL_Rect rect;
1252  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1253  default_width = rect.w;
1254  default_height = rect.h;
1255 }
1256 
1257 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1258 {
1259  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1260  int w,h;
1261 
1262  if (is_full_screen) flags |= SDL_FULLSCREEN;
1263  else flags |= SDL_RESIZABLE;
1264 
1265  if (vp && vp->width)
1266  set_default_window_size(vp->width, vp->height, vp->sar);
1267 
1269  w = fs_screen_width;
1270  h = fs_screen_height;
1271  } else if (!is_full_screen && screen_width) {
1272  w = screen_width;
1273  h = screen_height;
1274  } else {
1275  w = default_width;
1276  h = default_height;
1277  }
1278  w = FFMIN(16383, w);
1279  if (screen && is->width == screen->w && screen->w == w
1280  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1281  return 0;
1282  screen = SDL_SetVideoMode(w, h, 0, flags);
1283  if (!screen) {
1284  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1285  do_exit(is);
1286  }
1287  if (!window_title)
1289  SDL_WM_SetCaption(window_title, window_title);
1290 
1291  is->width = screen->w;
1292  is->height = screen->h;
1293 
1294  return 0;
1295 }
1296 
1297 /* display the current picture, if any */
1298 static void video_display(VideoState *is)
1299 {
1300  if (!screen)
1301  video_open(is, 0, NULL);
1302  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1303  video_audio_display(is);
1304  else if (is->video_st)
1305  video_image_display(is);
1306 }
1307 
1308 static double get_clock(Clock *c)
1309 {
1310  if (*c->queue_serial != c->serial)
1311  return NAN;
1312  if (c->paused) {
1313  return c->pts;
1314  } else {
1315  double time = av_gettime_relative() / 1000000.0;
1316  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1317  }
1318 }
1319 
1320 static void set_clock_at(Clock *c, double pts, int serial, double time)
1321 {
1322  c->pts = pts;
1323  c->last_updated = time;
1324  c->pts_drift = c->pts - time;
1325  c->serial = serial;
1326 }
1327 
1328 static void set_clock(Clock *c, double pts, int serial)
1329 {
1330  double time = av_gettime_relative() / 1000000.0;
1331  set_clock_at(c, pts, serial, time);
1332 }
1333 
1334 static void set_clock_speed(Clock *c, double speed)
1335 {
1336  set_clock(c, get_clock(c), c->serial);
1337  c->speed = speed;
1338 }
1339 
1340 static void init_clock(Clock *c, int *queue_serial)
1341 {
1342  c->speed = 1.0;
1343  c->paused = 0;
1344  c->queue_serial = queue_serial;
1345  set_clock(c, NAN, -1);
1346 }
1347 
1348 static void sync_clock_to_slave(Clock *c, Clock *slave)
1349 {
1350  double clock = get_clock(c);
1351  double slave_clock = get_clock(slave);
1352  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1353  set_clock(c, slave_clock, slave->serial);
1354 }
1355 
1357  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1358  if (is->video_st)
1359  return AV_SYNC_VIDEO_MASTER;
1360  else
1361  return AV_SYNC_AUDIO_MASTER;
1362  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1363  if (is->audio_st)
1364  return AV_SYNC_AUDIO_MASTER;
1365  else
1366  return AV_SYNC_EXTERNAL_CLOCK;
1367  } else {
1368  return AV_SYNC_EXTERNAL_CLOCK;
1369  }
1370 }
1371 
1372 /* get the current master clock value */
1373 static double get_master_clock(VideoState *is)
1374 {
1375  double val;
1376 
1377  switch (get_master_sync_type(is)) {
1378  case AV_SYNC_VIDEO_MASTER:
1379  val = get_clock(&is->vidclk);
1380  break;
1381  case AV_SYNC_AUDIO_MASTER:
1382  val = get_clock(&is->audclk);
1383  break;
1384  default:
1385  val = get_clock(&is->extclk);
1386  break;
1387  }
1388  return val;
1389 }
1390 
1392  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1395  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1398  } else {
1399  double speed = is->extclk.speed;
1400  if (speed != 1.0)
1401  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1402  }
1403 }
1404 
1405 /* seek in the stream */
1406 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1407 {
1408  if (!is->seek_req) {
1409  is->seek_pos = pos;
1410  is->seek_rel = rel;
1411  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1412  if (seek_by_bytes)
1414  is->seek_req = 1;
1415  SDL_CondSignal(is->continue_read_thread);
1416  }
1417 }
1418 
1419 /* pause or resume the video */
1421 {
1422  if (is->paused) {
1423  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1424  if (is->read_pause_return != AVERROR(ENOSYS)) {
1425  is->vidclk.paused = 0;
1426  }
1427  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1428  }
1429  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1430  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1431 }
1432 
1433 static void toggle_pause(VideoState *is)
1434 {
1435  stream_toggle_pause(is);
1436  is->step = 0;
1437 }
1438 
1439 static void toggle_mute(VideoState *is)
1440 {
1441  is->muted = !is->muted;
1442 }
1443 
1444 static void update_volume(VideoState *is, int sign, int step)
1445 {
1446  is->audio_volume = av_clip(is->audio_volume + sign * step, 0, SDL_MIX_MAXVOLUME);
1447 }
1448 
1450 {
1451  /* if the stream is paused unpause it, then step */
1452  if (is->paused)
1453  stream_toggle_pause(is);
1454  is->step = 1;
1455 }
1456 
1457 static double compute_target_delay(double delay, VideoState *is)
1458 {
1459  double sync_threshold, diff = 0;
1460 
1461  /* update delay to follow master synchronisation source */
1463  /* if video is slave, we try to correct big delays by
1464  duplicating or deleting a frame */
1465  diff = get_clock(&is->vidclk) - get_master_clock(is);
1466 
1467  /* skip or repeat frame. We take into account the
1468  delay to compute the threshold. I still don't know
1469  if it is the best guess */
1470  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1471  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1472  if (diff <= -sync_threshold)
1473  delay = FFMAX(0, delay + diff);
1474  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1475  delay = delay + diff;
1476  else if (diff >= sync_threshold)
1477  delay = 2 * delay;
1478  }
1479  }
1480 
1481  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1482  delay, -diff);
1483 
1484  return delay;
1485 }
1486 
1487 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1488  if (vp->serial == nextvp->serial) {
1489  double duration = nextvp->pts - vp->pts;
1490  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1491  return vp->duration;
1492  else
1493  return duration;
1494  } else {
1495  return 0.0;
1496  }
1497 }
1498 
1499 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1500  /* update current video pts */
1501  set_clock(&is->vidclk, pts, serial);
1502  sync_clock_to_slave(&is->extclk, &is->vidclk);
1503 }
1504 
1505 /* called to display each frame */
1506 static void video_refresh(void *opaque, double *remaining_time)
1507 {
1508  VideoState *is = opaque;
1509  double time;
1510 
1511  Frame *sp, *sp2;
1512 
1513  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1515 
1516  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1517  time = av_gettime_relative() / 1000000.0;
1518  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1519  video_display(is);
1520  is->last_vis_time = time;
1521  }
1522  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1523  }
1524 
1525  if (is->video_st) {
1526 retry:
1527  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1528  // nothing to do, no picture to display in the queue
1529  } else {
1530  double last_duration, duration, delay;
1531  Frame *vp, *lastvp;
1532 
1533  /* dequeue the picture */
1534  lastvp = frame_queue_peek_last(&is->pictq);
1535  vp = frame_queue_peek(&is->pictq);
1536 
1537  if (vp->serial != is->videoq.serial) {
1538  frame_queue_next(&is->pictq);
1539  goto retry;
1540  }
1541 
1542  if (lastvp->serial != vp->serial)
1543  is->frame_timer = av_gettime_relative() / 1000000.0;
1544 
1545  if (is->paused)
1546  goto display;
1547 
1548  /* compute nominal last_duration */
1549  last_duration = vp_duration(is, lastvp, vp);
1550  delay = compute_target_delay(last_duration, is);
1551 
1552  time= av_gettime_relative()/1000000.0;
1553  if (time < is->frame_timer + delay) {
1554  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1555  goto display;
1556  }
1557 
1558  is->frame_timer += delay;
1559  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1560  is->frame_timer = time;
1561 
1562  SDL_LockMutex(is->pictq.mutex);
1563  if (!isnan(vp->pts))
1564  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1565  SDL_UnlockMutex(is->pictq.mutex);
1566 
1567  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1568  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1569  duration = vp_duration(is, vp, nextvp);
1570  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1571  is->frame_drops_late++;
1572  frame_queue_next(&is->pictq);
1573  goto retry;
1574  }
1575  }
1576 
1577  if (is->subtitle_st) {
1578  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1579  sp = frame_queue_peek(&is->subpq);
1580 
1581  if (frame_queue_nb_remaining(&is->subpq) > 1)
1582  sp2 = frame_queue_peek_next(&is->subpq);
1583  else
1584  sp2 = NULL;
1585 
1586  if (sp->serial != is->subtitleq.serial
1587  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1588  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1589  {
1590  frame_queue_next(&is->subpq);
1591  } else {
1592  break;
1593  }
1594  }
1595  }
1596 
1597  frame_queue_next(&is->pictq);
1598  is->force_refresh = 1;
1599 
1600  if (is->step && !is->paused)
1601  stream_toggle_pause(is);
1602  }
1603 display:
1604  /* display picture */
1605  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1606  video_display(is);
1607  }
1608  is->force_refresh = 0;
1609  if (show_status) {
1610  static int64_t last_time;
1611  int64_t cur_time;
1612  int aqsize, vqsize, sqsize;
1613  double av_diff;
1614 
1615  cur_time = av_gettime_relative();
1616  if (!last_time || (cur_time - last_time) >= 30000) {
1617  aqsize = 0;
1618  vqsize = 0;
1619  sqsize = 0;
1620  if (is->audio_st)
1621  aqsize = is->audioq.size;
1622  if (is->video_st)
1623  vqsize = is->videoq.size;
1624  if (is->subtitle_st)
1625  sqsize = is->subtitleq.size;
1626  av_diff = 0;
1627  if (is->audio_st && is->video_st)
1628  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1629  else if (is->video_st)
1630  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1631  else if (is->audio_st)
1632  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1634  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1635  get_master_clock(is),
1636  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1637  av_diff,
1639  aqsize / 1024,
1640  vqsize / 1024,
1641  sqsize,
1644  fflush(stdout);
1645  last_time = cur_time;
1646  }
1647  }
1648 }
1649 
1650 /* allocate a picture (needs to do that in main thread to avoid
1651  potential locking problems */
1652 static void alloc_picture(VideoState *is)
1653 {
1654  Frame *vp;
1655  int64_t bufferdiff;
1656 
1657  vp = &is->pictq.queue[is->pictq.windex];
1658 
1659  free_picture(vp);
1660 
1661  video_open(is, 0, vp);
1662 
1663  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1664  SDL_YV12_OVERLAY,
1665  screen);
1666  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1667  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1668  /* SDL allocates a buffer smaller than requested if the video
1669  * overlay hardware is unable to support the requested size. */
1671  "Error: the video system does not support an image\n"
1672  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1673  "to reduce the image size.\n", vp->width, vp->height );
1674  do_exit(is);
1675  }
1676 
1677  SDL_LockMutex(is->pictq.mutex);
1678  vp->allocated = 1;
1679  SDL_CondSignal(is->pictq.cond);
1680  SDL_UnlockMutex(is->pictq.mutex);
1681 }
1682 
1683 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1684  int i, width, height;
1685  Uint8 *p, *maxp;
1686  for (i = 0; i < 3; i++) {
1687  width = bmp->w;
1688  height = bmp->h;
1689  if (i > 0) {
1690  width >>= 1;
1691  height >>= 1;
1692  }
1693  if (bmp->pitches[i] > width) {
1694  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1695  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1696  *(p+1) = *p;
1697  }
1698  }
1699 }
1700 
1701 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1702 {
1703  Frame *vp;
1704 
1705 #if defined(DEBUG_SYNC)
1706  printf("frame_type=%c pts=%0.3f\n",
1707  av_get_picture_type_char(src_frame->pict_type), pts);
1708 #endif
1709 
1710  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1711  return -1;
1712 
1713  vp->sar = src_frame->sample_aspect_ratio;
1714 
1715  /* alloc or resize hardware picture buffer */
1716  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1717  vp->width != src_frame->width ||
1718  vp->height != src_frame->height) {
1719  SDL_Event event;
1720 
1721  vp->allocated = 0;
1722  vp->reallocate = 0;
1723  vp->width = src_frame->width;
1724  vp->height = src_frame->height;
1725 
1726  /* the allocation must be done in the main thread to avoid
1727  locking problems. */
1728  event.type = FF_ALLOC_EVENT;
1729  event.user.data1 = is;
1730  SDL_PushEvent(&event);
1731 
1732  /* wait until the picture is allocated */
1733  SDL_LockMutex(is->pictq.mutex);
1734  while (!vp->allocated && !is->videoq.abort_request) {
1735  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1736  }
1737  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1738  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1739  while (!vp->allocated && !is->abort_request) {
1740  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1741  }
1742  }
1743  SDL_UnlockMutex(is->pictq.mutex);
1744 
1745  if (is->videoq.abort_request)
1746  return -1;
1747  }
1748 
1749  /* if the frame is not skipped, then display it */
1750  if (vp->bmp) {
1751  uint8_t *data[4];
1752  int linesize[4];
1753 
1754  /* get a pointer on the bitmap */
1755  SDL_LockYUVOverlay (vp->bmp);
1756 
1757  data[0] = vp->bmp->pixels[0];
1758  data[1] = vp->bmp->pixels[2];
1759  data[2] = vp->bmp->pixels[1];
1760 
1761  linesize[0] = vp->bmp->pitches[0];
1762  linesize[1] = vp->bmp->pitches[2];
1763  linesize[2] = vp->bmp->pitches[1];
1764 
1765 #if CONFIG_AVFILTER
1766  // FIXME use direct rendering
1767  av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
1768  src_frame->format, vp->width, vp->height);
1769 #else
1770  {
1771  AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0);
1772  if (e) {
1773  const AVClass *class = sws_get_class();
1774  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
1776  int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags);
1777  if (ret < 0)
1778  exit(1);
1779  }
1780  }
1781 
1783  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1785  if (!is->img_convert_ctx) {
1786  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1787  exit(1);
1788  }
1789  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1790  0, vp->height, data, linesize);
1791 #endif
1792  /* workaround SDL PITCH_WORKAROUND */
1794  /* update the bitmap content */
1795  SDL_UnlockYUVOverlay(vp->bmp);
1796 
1797  vp->pts = pts;
1798  vp->duration = duration;
1799  vp->pos = pos;
1800  vp->serial = serial;
1801 
1802  /* now we can update the picture count */
1803  frame_queue_push(&is->pictq);
1804  }
1805  return 0;
1806 }
1807 
1809 {
1810  int got_picture;
1811 
1812  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1813  return -1;
1814 
1815  if (got_picture) {
1816  double dpts = NAN;
1817 
1818  if (frame->pts != AV_NOPTS_VALUE)
1819  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1820 
1821  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1822 
1823  is->viddec_width = frame->width;
1824  is->viddec_height = frame->height;
1825 
1827  if (frame->pts != AV_NOPTS_VALUE) {
1828  double diff = dpts - get_master_clock(is);
1829  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1830  diff - is->frame_last_filter_delay < 0 &&
1831  is->viddec.pkt_serial == is->vidclk.serial &&
1832  is->videoq.nb_packets) {
1833  is->frame_drops_early++;
1834  av_frame_unref(frame);
1835  got_picture = 0;
1836  }
1837  }
1838  }
1839  }
1840 
1841  return got_picture;
1842 }
1843 
1844 #if CONFIG_AVFILTER
1845 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1846  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1847 {
1848  int ret, i;
1849  int nb_filters = graph->nb_filters;
1851 
1852  if (filtergraph) {
1853  outputs = avfilter_inout_alloc();
1854  inputs = avfilter_inout_alloc();
1855  if (!outputs || !inputs) {
1856  ret = AVERROR(ENOMEM);
1857  goto fail;
1858  }
1859 
1860  outputs->name = av_strdup("in");
1861  outputs->filter_ctx = source_ctx;
1862  outputs->pad_idx = 0;
1863  outputs->next = NULL;
1864 
1865  inputs->name = av_strdup("out");
1866  inputs->filter_ctx = sink_ctx;
1867  inputs->pad_idx = 0;
1868  inputs->next = NULL;
1869 
1870  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1871  goto fail;
1872  } else {
1873  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1874  goto fail;
1875  }
1876 
1877  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1878  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1879  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1880 
1881  ret = avfilter_graph_config(graph, NULL);
1882 fail:
1883  avfilter_inout_free(&outputs);
1884  avfilter_inout_free(&inputs);
1885  return ret;
1886 }
1887 
1888 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1889 {
1890  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1891  char sws_flags_str[512] = "";
1892  char buffersrc_args[256];
1893  int ret;
1894  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1895  AVCodecParameters *codecpar = is->video_st->codecpar;
1896  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1897  AVDictionaryEntry *e = NULL;
1898 
1899  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1900  if (!strcmp(e->key, "sws_flags")) {
1901  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1902  } else
1903  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1904  }
1905  if (strlen(sws_flags_str))
1906  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1907 
1908  graph->scale_sws_opts = av_strdup(sws_flags_str);
1909 
1910  snprintf(buffersrc_args, sizeof(buffersrc_args),
1911  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1912  frame->width, frame->height, frame->format,
1914  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1915  if (fr.num && fr.den)
1916  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1917 
1918  if ((ret = avfilter_graph_create_filter(&filt_src,
1919  avfilter_get_by_name("buffer"),
1920  "ffplay_buffer", buffersrc_args, NULL,
1921  graph)) < 0)
1922  goto fail;
1923 
1924  ret = avfilter_graph_create_filter(&filt_out,
1925  avfilter_get_by_name("buffersink"),
1926  "ffplay_buffersink", NULL, NULL, graph);
1927  if (ret < 0)
1928  goto fail;
1929 
1930  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1931  goto fail;
1932 
1933  last_filter = filt_out;
1934 
1935 /* Note: this macro adds a filter before the lastly added filter, so the
1936  * processing order of the filters is in reverse */
1937 #define INSERT_FILT(name, arg) do { \
1938  AVFilterContext *filt_ctx; \
1939  \
1940  ret = avfilter_graph_create_filter(&filt_ctx, \
1941  avfilter_get_by_name(name), \
1942  "ffplay_" name, arg, NULL, graph); \
1943  if (ret < 0) \
1944  goto fail; \
1945  \
1946  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1947  if (ret < 0) \
1948  goto fail; \
1949  \
1950  last_filter = filt_ctx; \
1951 } while (0)
1952 
1953  /* SDL YUV code is not handling odd width/height for some driver
1954  * combinations, therefore we crop the picture to an even width/height. */
1955  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1956 
1957  if (autorotate) {
1958  double theta = get_rotation(is->video_st);
1959 
1960  if (fabs(theta - 90) < 1.0) {
1961  INSERT_FILT("transpose", "clock");
1962  } else if (fabs(theta - 180) < 1.0) {
1963  INSERT_FILT("hflip", NULL);
1964  INSERT_FILT("vflip", NULL);
1965  } else if (fabs(theta - 270) < 1.0) {
1966  INSERT_FILT("transpose", "cclock");
1967  } else if (fabs(theta) > 1.0) {
1968  char rotate_buf[64];
1969  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1970  INSERT_FILT("rotate", rotate_buf);
1971  }
1972  }
1973 
1974  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1975  goto fail;
1976 
1977  is->in_video_filter = filt_src;
1978  is->out_video_filter = filt_out;
1979 
1980 fail:
1981  return ret;
1982 }
1983 
1984 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1985 {
1987  int sample_rates[2] = { 0, -1 };
1988  int64_t channel_layouts[2] = { 0, -1 };
1989  int channels[2] = { 0, -1 };
1990  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1991  char aresample_swr_opts[512] = "";
1992  AVDictionaryEntry *e = NULL;
1993  char asrc_args[256];
1994  int ret;
1995 
1996  avfilter_graph_free(&is->agraph);
1997  if (!(is->agraph = avfilter_graph_alloc()))
1998  return AVERROR(ENOMEM);
1999 
2000  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2001  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2002  if (strlen(aresample_swr_opts))
2003  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2004  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2005 
2006  ret = snprintf(asrc_args, sizeof(asrc_args),
2007  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2008  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2009  is->audio_filter_src.channels,
2010  1, is->audio_filter_src.freq);
2011  if (is->audio_filter_src.channel_layout)
2012  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2013  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2014 
2015  ret = avfilter_graph_create_filter(&filt_asrc,
2016  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2017  asrc_args, NULL, is->agraph);
2018  if (ret < 0)
2019  goto end;
2020 
2021 
2022  ret = avfilter_graph_create_filter(&filt_asink,
2023  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2024  NULL, NULL, is->agraph);
2025  if (ret < 0)
2026  goto end;
2027 
2028  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2029  goto end;
2030  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2031  goto end;
2032 
2033  if (force_output_format) {
2034  channel_layouts[0] = is->audio_tgt.channel_layout;
2035  channels [0] = is->audio_tgt.channels;
2036  sample_rates [0] = is->audio_tgt.freq;
2037  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2038  goto end;
2039  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2040  goto end;
2041  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2042  goto end;
2043  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2044  goto end;
2045  }
2046 
2047 
2048  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2049  goto end;
2050 
2051  is->in_audio_filter = filt_asrc;
2052  is->out_audio_filter = filt_asink;
2053 
2054 end:
2055  if (ret < 0)
2056  avfilter_graph_free(&is->agraph);
2057  return ret;
2058 }
2059 #endif /* CONFIG_AVFILTER */
2060 
2061 static int audio_thread(void *arg)
2062 {
2063  VideoState *is = arg;
2064  AVFrame *frame = av_frame_alloc();
2065  Frame *af;
2066 #if CONFIG_AVFILTER
2067  int last_serial = -1;
2068  int64_t dec_channel_layout;
2069  int reconfigure;
2070 #endif
2071  int got_frame = 0;
2072  AVRational tb;
2073  int ret = 0;
2074 
2075  if (!frame)
2076  return AVERROR(ENOMEM);
2077 
2078  do {
2079  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2080  goto the_end;
2081 
2082  if (got_frame) {
2083  tb = (AVRational){1, frame->sample_rate};
2084 
2085 #if CONFIG_AVFILTER
2086  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2087 
2088  reconfigure =
2089  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2090  frame->format, av_frame_get_channels(frame)) ||
2091  is->audio_filter_src.channel_layout != dec_channel_layout ||
2092  is->audio_filter_src.freq != frame->sample_rate ||
2093  is->auddec.pkt_serial != last_serial;
2094 
2095  if (reconfigure) {
2096  char buf1[1024], buf2[1024];
2097  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2098  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2100  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2101  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2103 
2104  is->audio_filter_src.fmt = frame->format;
2105  is->audio_filter_src.channels = av_frame_get_channels(frame);
2106  is->audio_filter_src.channel_layout = dec_channel_layout;
2107  is->audio_filter_src.freq = frame->sample_rate;
2108  last_serial = is->auddec.pkt_serial;
2109 
2110  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2111  goto the_end;
2112  }
2113 
2114  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2115  goto the_end;
2116 
2117  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2118  tb = is->out_audio_filter->inputs[0]->time_base;
2119 #endif
2120  if (!(af = frame_queue_peek_writable(&is->sampq)))
2121  goto the_end;
2122 
2123  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2124  af->pos = av_frame_get_pkt_pos(frame);
2125  af->serial = is->auddec.pkt_serial;
2126  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2127 
2128  av_frame_move_ref(af->frame, frame);
2129  frame_queue_push(&is->sampq);
2130 
2131 #if CONFIG_AVFILTER
2132  if (is->audioq.serial != is->auddec.pkt_serial)
2133  break;
2134  }
2135  if (ret == AVERROR_EOF)
2136  is->auddec.finished = is->auddec.pkt_serial;
2137 #endif
2138  }
2139  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2140  the_end:
2141 #if CONFIG_AVFILTER
2142  avfilter_graph_free(&is->agraph);
2143 #endif
2144  av_frame_free(&frame);
2145  return ret;
2146 }
2147 
2148 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2149 {
2151  d->decoder_tid = SDL_CreateThread(fn, arg);
2152  if (!d->decoder_tid) {
2153  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2154  return AVERROR(ENOMEM);
2155  }
2156  return 0;
2157 }
2158 
2159 static int video_thread(void *arg)
2160 {
2161  VideoState *is = arg;
2162  AVFrame *frame = av_frame_alloc();
2163  double pts;
2164  double duration;
2165  int ret;
2167  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2168 
2169 #if CONFIG_AVFILTER
2171  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2172  int last_w = 0;
2173  int last_h = 0;
2174  enum AVPixelFormat last_format = -2;
2175  int last_serial = -1;
2176  int last_vfilter_idx = 0;
2177  if (!graph) {
2178  av_frame_free(&frame);
2179  return AVERROR(ENOMEM);
2180  }
2181 
2182 #endif
2183 
2184  if (!frame) {
2185 #if CONFIG_AVFILTER
2186  avfilter_graph_free(&graph);
2187 #endif
2188  return AVERROR(ENOMEM);
2189  }
2190 
2191  for (;;) {
2192  ret = get_video_frame(is, frame);
2193  if (ret < 0)
2194  goto the_end;
2195  if (!ret)
2196  continue;
2197 
2198 #if CONFIG_AVFILTER
2199  if ( last_w != frame->width
2200  || last_h != frame->height
2201  || last_format != frame->format
2202  || last_serial != is->viddec.pkt_serial
2203  || last_vfilter_idx != is->vfilter_idx) {
2205  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2206  last_w, last_h,
2207  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2208  frame->width, frame->height,
2209  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2210  avfilter_graph_free(&graph);
2211  graph = avfilter_graph_alloc();
2212  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2213  SDL_Event event;
2214  event.type = FF_QUIT_EVENT;
2215  event.user.data1 = is;
2216  SDL_PushEvent(&event);
2217  goto the_end;
2218  }
2219  filt_in = is->in_video_filter;
2220  filt_out = is->out_video_filter;
2221  last_w = frame->width;
2222  last_h = frame->height;
2223  last_format = frame->format;
2224  last_serial = is->viddec.pkt_serial;
2225  last_vfilter_idx = is->vfilter_idx;
2226  frame_rate = filt_out->inputs[0]->frame_rate;
2227  }
2228 
2229  ret = av_buffersrc_add_frame(filt_in, frame);
2230  if (ret < 0)
2231  goto the_end;
2232 
2233  while (ret >= 0) {
2234  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2235 
2236  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2237  if (ret < 0) {
2238  if (ret == AVERROR_EOF)
2239  is->viddec.finished = is->viddec.pkt_serial;
2240  ret = 0;
2241  break;
2242  }
2243 
2245  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2246  is->frame_last_filter_delay = 0;
2247  tb = filt_out->inputs[0]->time_base;
2248 #endif
2249  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2250  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2251  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2252  av_frame_unref(frame);
2253 #if CONFIG_AVFILTER
2254  }
2255 #endif
2256 
2257  if (ret < 0)
2258  goto the_end;
2259  }
2260  the_end:
2261 #if CONFIG_AVFILTER
2262  avfilter_graph_free(&graph);
2263 #endif
2264  av_frame_free(&frame);
2265  return 0;
2266 }
2267 
2268 static int subtitle_thread(void *arg)
2269 {
2270  VideoState *is = arg;
2271  Frame *sp;
2272  int got_subtitle;
2273  double pts;
2274  int i;
2275 
2276  for (;;) {
2277  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2278  return 0;
2279 
2280  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2281  break;
2282 
2283  pts = 0;
2284 
2285  if (got_subtitle && sp->sub.format == 0) {
2286  if (sp->sub.pts != AV_NOPTS_VALUE)
2287  pts = sp->sub.pts / (double)AV_TIME_BASE;
2288  sp->pts = pts;
2289  sp->serial = is->subdec.pkt_serial;
2290  if (!(sp->subrects = av_mallocz_array(sp->sub.num_rects, sizeof(AVSubtitleRect*)))) {
2291  av_log(NULL, AV_LOG_FATAL, "Cannot allocate subrects\n");
2292  exit(1);
2293  }
2294 
2295  for (i = 0; i < sp->sub.num_rects; i++)
2296  {
2297  int in_w = sp->sub.rects[i]->w;
2298  int in_h = sp->sub.rects[i]->h;
2299  int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width;
2300  int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height;
2301  int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w;
2302  int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h;
2303 
2304  if (!(sp->subrects[i] = av_mallocz(sizeof(AVSubtitleRect))) ||
2305  av_image_alloc(sp->subrects[i]->data, sp->subrects[i]->linesize, out_w, out_h, AV_PIX_FMT_YUVA420P, 16) < 0) {
2306  av_log(NULL, AV_LOG_FATAL, "Cannot allocate subtitle data\n");
2307  exit(1);
2308  }
2309 
2311  in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
2313  if (!is->sub_convert_ctx) {
2314  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
2315  exit(1);
2316  }
2318  (void*)sp->sub.rects[i]->data, sp->sub.rects[i]->linesize,
2319  0, in_h, sp->subrects[i]->data, sp->subrects[i]->linesize);
2320 
2321  sp->subrects[i]->w = out_w;
2322  sp->subrects[i]->h = out_h;
2323  sp->subrects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
2324  sp->subrects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
2325  }
2326 
2327  /* now we can update the picture count */
2328  frame_queue_push(&is->subpq);
2329  } else if (got_subtitle) {
2330  avsubtitle_free(&sp->sub);
2331  }
2332  }
2333  return 0;
2334 }
2335 
2336 /* copy samples for viewing in editor window */
2337 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2338 {
2339  int size, len;
2340 
2341  size = samples_size / sizeof(short);
2342  while (size > 0) {
2344  if (len > size)
2345  len = size;
2346  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2347  samples += len;
2348  is->sample_array_index += len;
2350  is->sample_array_index = 0;
2351  size -= len;
2352  }
2353 }
2354 
2355 /* return the wanted number of samples to get better sync if sync_type is video
2356  * or external master clock */
2357 static int synchronize_audio(VideoState *is, int nb_samples)
2358 {
2359  int wanted_nb_samples = nb_samples;
2360 
2361  /* if not master, then we try to remove or add samples to correct the clock */
2363  double diff, avg_diff;
2364  int min_nb_samples, max_nb_samples;
2365 
2366  diff = get_clock(&is->audclk) - get_master_clock(is);
2367 
2368  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2369  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2371  /* not enough measures to have a correct estimate */
2372  is->audio_diff_avg_count++;
2373  } else {
2374  /* estimate the A-V difference */
2375  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2376 
2377  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2378  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2379  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2380  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2381  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2382  }
2383  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2384  diff, avg_diff, wanted_nb_samples - nb_samples,
2386  }
2387  } else {
2388  /* too big difference : may be initial PTS errors, so
2389  reset A-V filter */
2390  is->audio_diff_avg_count = 0;
2391  is->audio_diff_cum = 0;
2392  }
2393  }
2394 
2395  return wanted_nb_samples;
2396 }
2397 
2398 /**
2399  * Decode one audio frame and return its uncompressed size.
2400  *
2401  * The processed audio frame is decoded, converted if required, and
2402  * stored in is->audio_buf, with size in bytes given by the return
2403  * value.
2404  */
2406 {
2407  int data_size, resampled_data_size;
2408  int64_t dec_channel_layout;
2409  av_unused double audio_clock0;
2410  int wanted_nb_samples;
2411  Frame *af;
2412 
2413  if (is->paused)
2414  return -1;
2415 
2416  do {
2417 #if defined(_WIN32)
2418  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2420  return -1;
2421  av_usleep (1000);
2422  }
2423 #endif
2424  if (!(af = frame_queue_peek_readable(&is->sampq)))
2425  return -1;
2426  frame_queue_next(&is->sampq);
2427  } while (af->serial != is->audioq.serial);
2428 
2430  af->frame->nb_samples,
2431  af->frame->format, 1);
2432 
2433  dec_channel_layout =
2436  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2437 
2438  if (af->frame->format != is->audio_src.fmt ||
2439  dec_channel_layout != is->audio_src.channel_layout ||
2440  af->frame->sample_rate != is->audio_src.freq ||
2441  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2442  swr_free(&is->swr_ctx);
2445  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2446  0, NULL);
2447  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2449  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2452  swr_free(&is->swr_ctx);
2453  return -1;
2454  }
2455  is->audio_src.channel_layout = dec_channel_layout;
2457  is->audio_src.freq = af->frame->sample_rate;
2458  is->audio_src.fmt = af->frame->format;
2459  }
2460 
2461  if (is->swr_ctx) {
2462  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2463  uint8_t **out = &is->audio_buf1;
2464  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2465  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2466  int len2;
2467  if (out_size < 0) {
2468  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2469  return -1;
2470  }
2471  if (wanted_nb_samples != af->frame->nb_samples) {
2472  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2473  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2474  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2475  return -1;
2476  }
2477  }
2478  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2479  if (!is->audio_buf1)
2480  return AVERROR(ENOMEM);
2481  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2482  if (len2 < 0) {
2483  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2484  return -1;
2485  }
2486  if (len2 == out_count) {
2487  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2488  if (swr_init(is->swr_ctx) < 0)
2489  swr_free(&is->swr_ctx);
2490  }
2491  is->audio_buf = is->audio_buf1;
2492  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2493  } else {
2494  is->audio_buf = af->frame->data[0];
2495  resampled_data_size = data_size;
2496  }
2497 
2498  audio_clock0 = is->audio_clock;
2499  /* update the audio clock with the pts */
2500  if (!isnan(af->pts))
2501  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2502  else
2503  is->audio_clock = NAN;
2504  is->audio_clock_serial = af->serial;
2505 #ifdef DEBUG
2506  {
2507  static double last_clock;
2508  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2509  is->audio_clock - last_clock,
2510  is->audio_clock, audio_clock0);
2511  last_clock = is->audio_clock;
2512  }
2513 #endif
2514  return resampled_data_size;
2515 }
2516 
2517 /* prepare a new audio buffer */
2518 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2519 {
2520  VideoState *is = opaque;
2521  int audio_size, len1;
2522 
2524 
2525  while (len > 0) {
2526  if (is->audio_buf_index >= is->audio_buf_size) {
2527  audio_size = audio_decode_frame(is);
2528  if (audio_size < 0) {
2529  /* if error, just output silence */
2530  is->audio_buf = NULL;
2532  } else {
2533  if (is->show_mode != SHOW_MODE_VIDEO)
2534  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2535  is->audio_buf_size = audio_size;
2536  }
2537  is->audio_buf_index = 0;
2538  }
2539  len1 = is->audio_buf_size - is->audio_buf_index;
2540  if (len1 > len)
2541  len1 = len;
2542  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2543  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2544  else {
2545  memset(stream, 0, len1);
2546  if (!is->muted && is->audio_buf)
2547  SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
2548  }
2549  len -= len1;
2550  stream += len1;
2551  is->audio_buf_index += len1;
2552  }
2554  /* Let's assume the audio driver that is used by SDL has two periods. */
2555  if (!isnan(is->audio_clock)) {
2557  sync_clock_to_slave(&is->extclk, &is->audclk);
2558  }
2559 }
2560 
2561 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2562 {
2563  SDL_AudioSpec wanted_spec, spec;
2564  const char *env;
2565  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2566  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2567  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2568 
2569  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2570  if (env) {
2571  wanted_nb_channels = atoi(env);
2572  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2573  }
2574  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2575  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2576  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2577  }
2578  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2579  wanted_spec.channels = wanted_nb_channels;
2580  wanted_spec.freq = wanted_sample_rate;
2581  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2582  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2583  return -1;
2584  }
2585  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2586  next_sample_rate_idx--;
2587  wanted_spec.format = AUDIO_S16SYS;
2588  wanted_spec.silence = 0;
2589  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2590  wanted_spec.callback = sdl_audio_callback;
2591  wanted_spec.userdata = opaque;
2592  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2593  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2594  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2595  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2596  if (!wanted_spec.channels) {
2597  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2598  wanted_spec.channels = wanted_nb_channels;
2599  if (!wanted_spec.freq) {
2601  "No more combinations to try, audio open failed\n");
2602  return -1;
2603  }
2604  }
2605  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2606  }
2607  if (spec.format != AUDIO_S16SYS) {
2609  "SDL advised audio format %d is not supported!\n", spec.format);
2610  return -1;
2611  }
2612  if (spec.channels != wanted_spec.channels) {
2613  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2614  if (!wanted_channel_layout) {
2616  "SDL advised channel count %d is not supported!\n", spec.channels);
2617  return -1;
2618  }
2619  }
2620 
2621  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2622  audio_hw_params->freq = spec.freq;
2623  audio_hw_params->channel_layout = wanted_channel_layout;
2624  audio_hw_params->channels = spec.channels;
2625  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2626  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2627  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2628  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2629  return -1;
2630  }
2631  return spec.size;
2632 }
2633 
2634 /* open a given stream. Return 0 if OK */
2635 static int stream_component_open(VideoState *is, int stream_index)
2636 {
2637  AVFormatContext *ic = is->ic;
2638  AVCodecContext *avctx;
2639  AVCodec *codec;
2640  const char *forced_codec_name = NULL;
2641  AVDictionary *opts = NULL;
2642  AVDictionaryEntry *t = NULL;
2643  int sample_rate, nb_channels;
2644  int64_t channel_layout;
2645  int ret = 0;
2646  int stream_lowres = lowres;
2647 
2648  if (stream_index < 0 || stream_index >= ic->nb_streams)
2649  return -1;
2650 
2651  avctx = avcodec_alloc_context3(NULL);
2652  if (!avctx)
2653  return AVERROR(ENOMEM);
2654 
2655  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2656  if (ret < 0)
2657  goto fail;
2658  av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
2659 
2660  codec = avcodec_find_decoder(avctx->codec_id);
2661 
2662  switch(avctx->codec_type){
2663  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2664  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2665  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2666  }
2667  if (forced_codec_name)
2668  codec = avcodec_find_decoder_by_name(forced_codec_name);
2669  if (!codec) {
2670  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2671  "No codec could be found with name '%s'\n", forced_codec_name);
2672  else av_log(NULL, AV_LOG_WARNING,
2673  "No codec could be found with id %d\n", avctx->codec_id);
2674  ret = AVERROR(EINVAL);
2675  goto fail;
2676  }
2677 
2678  avctx->codec_id = codec->id;
2679  if(stream_lowres > av_codec_get_max_lowres(codec)){
2680  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2681  av_codec_get_max_lowres(codec));
2682  stream_lowres = av_codec_get_max_lowres(codec);
2683  }
2684  av_codec_set_lowres(avctx, stream_lowres);
2685 
2686 #if FF_API_EMU_EDGE
2687  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2688 #endif
2689  if (fast)
2690  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2691 #if FF_API_EMU_EDGE
2692  if(codec->capabilities & AV_CODEC_CAP_DR1)
2693  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2694 #endif
2695 
2696  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2697  if (!av_dict_get(opts, "threads", NULL, 0))
2698  av_dict_set(&opts, "threads", "auto", 0);
2699  if (stream_lowres)
2700  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2701  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2702  av_dict_set(&opts, "refcounted_frames", "1", 0);
2703  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2704  goto fail;
2705  }
2706  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2707  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2709  goto fail;
2710  }
2711 
2712  is->eof = 0;
2713  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2714  switch (avctx->codec_type) {
2715  case AVMEDIA_TYPE_AUDIO:
2716 #if CONFIG_AVFILTER
2717  {
2718  AVFilterLink *link;
2719 
2720  is->audio_filter_src.freq = avctx->sample_rate;
2721  is->audio_filter_src.channels = avctx->channels;
2722  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2723  is->audio_filter_src.fmt = avctx->sample_fmt;
2724  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2725  goto fail;
2726  link = is->out_audio_filter->inputs[0];
2727  sample_rate = link->sample_rate;
2728  nb_channels = link->channels;
2729  channel_layout = link->channel_layout;
2730  }
2731 #else
2732  sample_rate = avctx->sample_rate;
2733  nb_channels = avctx->channels;
2734  channel_layout = avctx->channel_layout;
2735 #endif
2736 
2737  /* prepare audio output */
2738  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2739  goto fail;
2740  is->audio_hw_buf_size = ret;
2741  is->audio_src = is->audio_tgt;
2742  is->audio_buf_size = 0;
2743  is->audio_buf_index = 0;
2744 
2745  /* init averaging filter */
2746  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2747  is->audio_diff_avg_count = 0;
2748  /* since we do not have a precise anough audio FIFO fullness,
2749  we correct audio sync only if larger than this threshold */
2751 
2752  is->audio_stream = stream_index;
2753  is->audio_st = ic->streams[stream_index];
2754 
2755  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2757  is->auddec.start_pts = is->audio_st->start_time;
2759  }
2760  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2761  goto out;
2762  SDL_PauseAudio(0);
2763  break;
2764  case AVMEDIA_TYPE_VIDEO:
2765  is->video_stream = stream_index;
2766  is->video_st = ic->streams[stream_index];
2767 
2768  is->viddec_width = avctx->width;
2769  is->viddec_height = avctx->height;
2770 
2771  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2772  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2773  goto out;
2774  is->queue_attachments_req = 1;
2775  break;
2776  case AVMEDIA_TYPE_SUBTITLE:
2777  is->subtitle_stream = stream_index;
2778  is->subtitle_st = ic->streams[stream_index];
2779 
2780  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2781  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2782  goto out;
2783  break;
2784  default:
2785  break;
2786  }
2787  goto out;
2788 
2789 fail:
2790  avcodec_free_context(&avctx);
2791 out:
2792  av_dict_free(&opts);
2793 
2794  return ret;
2795 }
2796 
2797 static int decode_interrupt_cb(void *ctx)
2798 {
2799  VideoState *is = ctx;
2800  return is->abort_request;
2801 }
2802 
2803 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2804  return stream_id < 0 ||
2805  queue->abort_request ||
2807  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2808 }
2809 
2811 {
2812  if( !strcmp(s->iformat->name, "rtp")
2813  || !strcmp(s->iformat->name, "rtsp")
2814  || !strcmp(s->iformat->name, "sdp")
2815  )
2816  return 1;
2817 
2818  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2819  || !strncmp(s->filename, "udp:", 4)
2820  )
2821  )
2822  return 1;
2823  return 0;
2824 }
2825 
2826 /* this thread gets the stream from the disk or the network */
2827 static int read_thread(void *arg)
2828 {
2829  VideoState *is = arg;
2830  AVFormatContext *ic = NULL;
2831  int err, i, ret;
2832  int st_index[AVMEDIA_TYPE_NB];
2833  AVPacket pkt1, *pkt = &pkt1;
2834  int64_t stream_start_time;
2835  int pkt_in_play_range = 0;
2836  AVDictionaryEntry *t;
2837  AVDictionary **opts;
2838  int orig_nb_streams;
2839  SDL_mutex *wait_mutex = SDL_CreateMutex();
2840  int scan_all_pmts_set = 0;
2841  int64_t pkt_ts;
2842 
2843  if (!wait_mutex) {
2844  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2845  ret = AVERROR(ENOMEM);
2846  goto fail;
2847  }
2848 
2849  memset(st_index, -1, sizeof(st_index));
2850  is->last_video_stream = is->video_stream = -1;
2851  is->last_audio_stream = is->audio_stream = -1;
2852  is->last_subtitle_stream = is->subtitle_stream = -1;
2853  is->eof = 0;
2854 
2855  ic = avformat_alloc_context();
2856  if (!ic) {
2857  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2858  ret = AVERROR(ENOMEM);
2859  goto fail;
2860  }
2862  ic->interrupt_callback.opaque = is;
2863  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2864  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2865  scan_all_pmts_set = 1;
2866  }
2867  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2868  if (err < 0) {
2869  print_error(is->filename, err);
2870  ret = -1;
2871  goto fail;
2872  }
2873  if (scan_all_pmts_set)
2874  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2875 
2877  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2879  goto fail;
2880  }
2881  is->ic = ic;
2882 
2883  if (genpts)
2884  ic->flags |= AVFMT_FLAG_GENPTS;
2885 
2887 
2889  orig_nb_streams = ic->nb_streams;
2890 
2891  err = avformat_find_stream_info(ic, opts);
2892 
2893  for (i = 0; i < orig_nb_streams; i++)
2894  av_dict_free(&opts[i]);
2895  av_freep(&opts);
2896 
2897  if (err < 0) {
2899  "%s: could not find codec parameters\n", is->filename);
2900  ret = -1;
2901  goto fail;
2902  }
2903 
2904  if (ic->pb)
2905  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2906 
2907  if (seek_by_bytes < 0)
2908  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2909 
2910  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2911 
2912  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2913  window_title = av_asprintf("%s - %s", t->value, input_filename);
2914 
2915  /* if seeking requested, we execute it */
2916  if (start_time != AV_NOPTS_VALUE) {
2917  int64_t timestamp;
2918 
2919  timestamp = start_time;
2920  /* add the stream start time */
2921  if (ic->start_time != AV_NOPTS_VALUE)
2922  timestamp += ic->start_time;
2923  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2924  if (ret < 0) {
2925  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2926  is->filename, (double)timestamp / AV_TIME_BASE);
2927  }
2928  }
2929 
2930  is->realtime = is_realtime(ic);
2931 
2932  if (show_status)
2933  av_dump_format(ic, 0, is->filename, 0);
2934 
2935  for (i = 0; i < ic->nb_streams; i++) {
2936  AVStream *st = ic->streams[i];
2937  enum AVMediaType type = st->codecpar->codec_type;
2938  st->discard = AVDISCARD_ALL;
2939  if (wanted_stream_spec[type] && st_index[type] == -1)
2940  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2941  st_index[type] = i;
2942  }
2943  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2944  if (wanted_stream_spec[i] && st_index[i] == -1) {
2945  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2946  st_index[i] = INT_MAX;
2947  }
2948  }
2949 
2950  if (!video_disable)
2951  st_index[AVMEDIA_TYPE_VIDEO] =
2953  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2954  if (!audio_disable)
2955  st_index[AVMEDIA_TYPE_AUDIO] =
2957  st_index[AVMEDIA_TYPE_AUDIO],
2958  st_index[AVMEDIA_TYPE_VIDEO],
2959  NULL, 0);
2961  st_index[AVMEDIA_TYPE_SUBTITLE] =
2963  st_index[AVMEDIA_TYPE_SUBTITLE],
2964  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2965  st_index[AVMEDIA_TYPE_AUDIO] :
2966  st_index[AVMEDIA_TYPE_VIDEO]),
2967  NULL, 0);
2968 
2969  is->show_mode = show_mode;
2970  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2971  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2972  AVCodecParameters *codecpar = st->codecpar;
2974  if (codecpar->width)
2975  set_default_window_size(codecpar->width, codecpar->height, sar);
2976  }
2977 
2978  /* open the streams */
2979  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2980  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2981  }
2982 
2983  ret = -1;
2984  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2985  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2986  }
2987  if (is->show_mode == SHOW_MODE_NONE)
2988  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2989 
2990  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2991  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2992  }
2993 
2994  if (is->video_stream < 0 && is->audio_stream < 0) {
2995  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2996  is->filename);
2997  ret = -1;
2998  goto fail;
2999  }
3000 
3001  if (infinite_buffer < 0 && is->realtime)
3002  infinite_buffer = 1;
3003 
3004  for (;;) {
3005  if (is->abort_request)
3006  break;
3007  if (is->paused != is->last_paused) {
3008  is->last_paused = is->paused;
3009  if (is->paused)
3010  is->read_pause_return = av_read_pause(ic);
3011  else
3012  av_read_play(ic);
3013  }
3014 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3015  if (is->paused &&
3016  (!strcmp(ic->iformat->name, "rtsp") ||
3017  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3018  /* wait 10 ms to avoid trying to get another packet */
3019  /* XXX: horrible */
3020  SDL_Delay(10);
3021  continue;
3022  }
3023 #endif
3024  if (is->seek_req) {
3025  int64_t seek_target = is->seek_pos;
3026  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3027  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3028 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3029 // of the seek_pos/seek_rel variables
3030 
3031  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3032  if (ret < 0) {
3034  "%s: error while seeking\n", is->ic->filename);
3035  } else {
3036  if (is->audio_stream >= 0) {
3037  packet_queue_flush(&is->audioq);
3038  packet_queue_put(&is->audioq, &flush_pkt);
3039  }
3040  if (is->subtitle_stream >= 0) {
3042  packet_queue_put(&is->subtitleq, &flush_pkt);
3043  }
3044  if (is->video_stream >= 0) {
3045  packet_queue_flush(&is->videoq);
3046  packet_queue_put(&is->videoq, &flush_pkt);
3047  }
3048  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3049  set_clock(&is->extclk, NAN, 0);
3050  } else {
3051  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3052  }
3053  }
3054  is->seek_req = 0;
3055  is->queue_attachments_req = 1;
3056  is->eof = 0;
3057  if (is->paused)
3058  step_to_next_frame(is);
3059  }
3060  if (is->queue_attachments_req) {
3062  AVPacket copy;
3063  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3064  goto fail;
3065  packet_queue_put(&is->videoq, &copy);
3067  }
3068  is->queue_attachments_req = 0;
3069  }
3070 
3071  /* if the queue are full, no need to read more */
3072  if (infinite_buffer<1 &&
3073  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3074  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3077  /* wait 10 ms */
3078  SDL_LockMutex(wait_mutex);
3079  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3080  SDL_UnlockMutex(wait_mutex);
3081  continue;
3082  }
3083  if (!is->paused &&
3084  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3085  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3086  if (loop != 1 && (!loop || --loop)) {
3087  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3088  } else if (autoexit) {
3089  ret = AVERROR_EOF;
3090  goto fail;
3091  }
3092  }
3093  ret = av_read_frame(ic, pkt);
3094  if (ret < 0) {
3095  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3096  if (is->video_stream >= 0)
3098  if (is->audio_stream >= 0)
3100  if (is->subtitle_stream >= 0)
3102  is->eof = 1;
3103  }
3104  if (ic->pb && ic->pb->error)
3105  break;
3106  SDL_LockMutex(wait_mutex);
3107  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3108  SDL_UnlockMutex(wait_mutex);
3109  continue;
3110  } else {
3111  is->eof = 0;
3112  }
3113  /* check if packet is in play range specified by user, then queue, otherwise discard */
3114  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3115  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3116  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3117  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3118  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3119  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3120  <= ((double)duration / 1000000);
3121  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3122  packet_queue_put(&is->audioq, pkt);
3123  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3125  packet_queue_put(&is->videoq, pkt);
3126  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3127  packet_queue_put(&is->subtitleq, pkt);
3128  } else {
3129  av_packet_unref(pkt);
3130  }
3131  }
3132 
3133  ret = 0;
3134  fail:
3135  if (ic && !is->ic)
3136  avformat_close_input(&ic);
3137 
3138  if (ret != 0) {
3139  SDL_Event event;
3140 
3141  event.type = FF_QUIT_EVENT;
3142  event.user.data1 = is;
3143  SDL_PushEvent(&event);
3144  }
3145  SDL_DestroyMutex(wait_mutex);
3146  return 0;
3147 }
3148 
3149 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3150 {
3151  VideoState *is;
3152 
3153  is = av_mallocz(sizeof(VideoState));
3154  if (!is)
3155  return NULL;
3156  is->filename = av_strdup(filename);
3157  if (!is->filename)
3158  goto fail;
3159  is->iformat = iformat;
3160  is->ytop = 0;
3161  is->xleft = 0;
3162 
3163  /* start video display */
3164  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3165  goto fail;
3166  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3167  goto fail;
3168  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3169  goto fail;
3170 
3171  if (packet_queue_init(&is->videoq) < 0 ||
3172  packet_queue_init(&is->audioq) < 0 ||
3173  packet_queue_init(&is->subtitleq) < 0)
3174  goto fail;
3175 
3176  if (!(is->continue_read_thread = SDL_CreateCond())) {
3177  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3178  goto fail;
3179  }
3180 
3181  init_clock(&is->vidclk, &is->videoq.serial);
3182  init_clock(&is->audclk, &is->audioq.serial);
3183  init_clock(&is->extclk, &is->extclk.serial);
3184  is->audio_clock_serial = -1;
3185  is->audio_volume = SDL_MIX_MAXVOLUME;
3186  is->muted = 0;
3187  is->av_sync_type = av_sync_type;
3188  is->read_tid = SDL_CreateThread(read_thread, is);
3189  if (!is->read_tid) {
3190  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3191 fail:
3192  stream_close(is);
3193  return NULL;
3194  }
3195  return is;
3196 }
3197 
3199 {
3200  AVFormatContext *ic = is->ic;
3201  int start_index, stream_index;
3202  int old_index;
3203  AVStream *st;
3204  AVProgram *p = NULL;
3205  int nb_streams = is->ic->nb_streams;
3206 
3207  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3208  start_index = is->last_video_stream;
3209  old_index = is->video_stream;
3210  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3211  start_index = is->last_audio_stream;
3212  old_index = is->audio_stream;
3213  } else {
3214  start_index = is->last_subtitle_stream;
3215  old_index = is->subtitle_stream;
3216  }
3217  stream_index = start_index;
3218 
3219  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3221  if (p) {
3222  nb_streams = p->nb_stream_indexes;
3223  for (start_index = 0; start_index < nb_streams; start_index++)
3224  if (p->stream_index[start_index] == stream_index)
3225  break;
3226  if (start_index == nb_streams)
3227  start_index = -1;
3228  stream_index = start_index;
3229  }
3230  }
3231 
3232  for (;;) {
3233  if (++stream_index >= nb_streams)
3234  {
3235  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3236  {
3237  stream_index = -1;
3238  is->last_subtitle_stream = -1;
3239  goto the_end;
3240  }
3241  if (start_index == -1)
3242  return;
3243  stream_index = 0;
3244  }
3245  if (stream_index == start_index)
3246  return;
3247  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3248  if (st->codecpar->codec_type == codec_type) {
3249  /* check that parameters are OK */
3250  switch (codec_type) {
3251  case AVMEDIA_TYPE_AUDIO:
3252  if (st->codecpar->sample_rate != 0 &&
3253  st->codecpar->channels != 0)
3254  goto the_end;
3255  break;
3256  case AVMEDIA_TYPE_VIDEO:
3257  case AVMEDIA_TYPE_SUBTITLE:
3258  goto the_end;
3259  default:
3260  break;
3261  }
3262  }
3263  }
3264  the_end:
3265  if (p && stream_index != -1)
3266  stream_index = p->stream_index[stream_index];
3267  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3268  av_get_media_type_string(codec_type),
3269  old_index,
3270  stream_index);
3271 
3272  stream_component_close(is, old_index);
3273  stream_component_open(is, stream_index);
3274 }
3275 
3276 
3278 {
3279 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3280  /* OS X needs to reallocate the SDL overlays */
3281  int i;
3282  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3283  is->pictq.queue[i].reallocate = 1;
3284 #endif
3286  video_open(is, 1, NULL);
3287 }
3288 
3290 {
3291  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3292  int next = is->show_mode;
3293  do {
3294  next = (next + 1) % SHOW_MODE_NB;
3295  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3296  if (is->show_mode != next) {
3298  is->xleft, is->ytop, is->width, is->height,
3299  bgcolor, 1);
3300  is->force_refresh = 1;
3301  is->show_mode = next;
3302  }
3303 }
3304 
3305 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3306  double remaining_time = 0.0;
3307  SDL_PumpEvents();
3308  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3310  SDL_ShowCursor(0);
3311  cursor_hidden = 1;
3312  }
3313  if (remaining_time > 0.0)
3314  av_usleep((int64_t)(remaining_time * 1000000.0));
3315  remaining_time = REFRESH_RATE;
3316  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3317  video_refresh(is, &remaining_time);
3318  SDL_PumpEvents();
3319  }
3320 }
3321 
3322 static void seek_chapter(VideoState *is, int incr)
3323 {
3324  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3325  int i;
3326 
3327  if (!is->ic->nb_chapters)
3328  return;
3329 
3330  /* find the current chapter */
3331  for (i = 0; i < is->ic->nb_chapters; i++) {
3332  AVChapter *ch = is->ic->chapters[i];
3333  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3334  i--;
3335  break;
3336  }
3337  }
3338 
3339  i += incr;
3340  i = FFMAX(i, 0);
3341  if (i >= is->ic->nb_chapters)
3342  return;
3343 
3344  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3345  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3346  AV_TIME_BASE_Q), 0, 0);
3347 }
3348 
3349 /* handle an event sent by the GUI */
3350 static void event_loop(VideoState *cur_stream)
3351 {
3352  SDL_Event event;
3353  double incr, pos, frac;
3354 
3355  for (;;) {
3356  double x;
3357  refresh_loop_wait_event(cur_stream, &event);
3358  switch (event.type) {
3359  case SDL_KEYDOWN:
3360  if (exit_on_keydown) {
3361  do_exit(cur_stream);
3362  break;
3363  }
3364  switch (event.key.keysym.sym) {
3365  case SDLK_ESCAPE:
3366  case SDLK_q:
3367  do_exit(cur_stream);
3368  break;
3369  case SDLK_f:
3370  toggle_full_screen(cur_stream);
3371  cur_stream->force_refresh = 1;
3372  break;
3373  case SDLK_p:
3374  case SDLK_SPACE:
3375  toggle_pause(cur_stream);
3376  break;
3377  case SDLK_m:
3378  toggle_mute(cur_stream);
3379  break;
3380  case SDLK_KP_MULTIPLY:
3381  case SDLK_0:
3382  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3383  break;
3384  case SDLK_KP_DIVIDE:
3385  case SDLK_9:
3386  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3387  break;
3388  case SDLK_s: // S: Step to next frame
3389  step_to_next_frame(cur_stream);
3390  break;
3391  case SDLK_a:
3393  break;
3394  case SDLK_v:
3396  break;
3397  case SDLK_c:
3401  break;
3402  case SDLK_t:
3404  break;
3405  case SDLK_w:
3406 #if CONFIG_AVFILTER
3407  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3408  if (++cur_stream->vfilter_idx >= nb_vfilters)
3409  cur_stream->vfilter_idx = 0;
3410  } else {
3411  cur_stream->vfilter_idx = 0;
3412  toggle_audio_display(cur_stream);
3413  }
3414 #else
3415  toggle_audio_display(cur_stream);
3416 #endif
3417  break;
3418  case SDLK_PAGEUP:
3419  if (cur_stream->ic->nb_chapters <= 1) {
3420  incr = 600.0;
3421  goto do_seek;
3422  }
3423  seek_chapter(cur_stream, 1);
3424  break;
3425  case SDLK_PAGEDOWN:
3426  if (cur_stream->ic->nb_chapters <= 1) {
3427  incr = -600.0;
3428  goto do_seek;
3429  }
3430  seek_chapter(cur_stream, -1);
3431  break;
3432  case SDLK_LEFT:
3433  incr = -10.0;
3434  goto do_seek;
3435  case SDLK_RIGHT:
3436  incr = 10.0;
3437  goto do_seek;
3438  case SDLK_UP:
3439  incr = 60.0;
3440  goto do_seek;
3441  case SDLK_DOWN:
3442  incr = -60.0;
3443  do_seek:
3444  if (seek_by_bytes) {
3445  pos = -1;
3446  if (pos < 0 && cur_stream->video_stream >= 0)
3447  pos = frame_queue_last_pos(&cur_stream->pictq);
3448  if (pos < 0 && cur_stream->audio_stream >= 0)
3449  pos = frame_queue_last_pos(&cur_stream->sampq);
3450  if (pos < 0)
3451  pos = avio_tell(cur_stream->ic->pb);
3452  if (cur_stream->ic->bit_rate)
3453  incr *= cur_stream->ic->bit_rate / 8.0;
3454  else
3455  incr *= 180000.0;
3456  pos += incr;
3457  stream_seek(cur_stream, pos, incr, 1);
3458  } else {
3459  pos = get_master_clock(cur_stream);
3460  if (isnan(pos))
3461  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3462  pos += incr;
3463  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3464  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3465  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3466  }
3467  break;
3468  default:
3469  break;
3470  }
3471  break;
3472  case SDL_VIDEOEXPOSE:
3473  cur_stream->force_refresh = 1;
3474  break;
3475  case SDL_MOUSEBUTTONDOWN:
3476  if (exit_on_mousedown) {
3477  do_exit(cur_stream);
3478  break;
3479  }
3480  if (event.button.button == SDL_BUTTON_LEFT) {
3481  static int64_t last_mouse_left_click = 0;
3482  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3483  toggle_full_screen(cur_stream);
3484  cur_stream->force_refresh = 1;
3485  last_mouse_left_click = 0;
3486  } else {
3487  last_mouse_left_click = av_gettime_relative();
3488  }
3489  }
3490  case SDL_MOUSEMOTION:
3491  if (cursor_hidden) {
3492  SDL_ShowCursor(1);
3493  cursor_hidden = 0;
3494  }
3496  if (event.type == SDL_MOUSEBUTTONDOWN) {
3497  if (event.button.button != SDL_BUTTON_RIGHT)
3498  break;
3499  x = event.button.x;
3500  } else {
3501  if (!(event.motion.state & SDL_BUTTON_RMASK))
3502  break;
3503  x = event.motion.x;
3504  }
3505  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3506  uint64_t size = avio_size(cur_stream->ic->pb);
3507  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3508  } else {
3509  int64_t ts;
3510  int ns, hh, mm, ss;
3511  int tns, thh, tmm, tss;
3512  tns = cur_stream->ic->duration / 1000000LL;
3513  thh = tns / 3600;
3514  tmm = (tns % 3600) / 60;
3515  tss = (tns % 60);
3516  frac = x / cur_stream->width;
3517  ns = frac * tns;
3518  hh = ns / 3600;
3519  mm = (ns % 3600) / 60;
3520  ss = (ns % 60);
3522  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3523  hh, mm, ss, thh, tmm, tss);
3524  ts = frac * cur_stream->ic->duration;
3525  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3526  ts += cur_stream->ic->start_time;
3527  stream_seek(cur_stream, ts, 0, 0);
3528  }
3529  break;
3530  case SDL_VIDEORESIZE:
3531  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3532  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3533  if (!screen) {
3534  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3535  do_exit(cur_stream);
3536  }
3537  screen_width = cur_stream->width = screen->w;
3538  screen_height = cur_stream->height = screen->h;
3539  cur_stream->force_refresh = 1;
3540  break;
3541  case SDL_QUIT:
3542  case FF_QUIT_EVENT:
3543  do_exit(cur_stream);
3544  break;
3545  case FF_ALLOC_EVENT:
3546  alloc_picture(event.user.data1);
3547  break;
3548  default:
3549  break;
3550  }
3551  }
3552 }
3553 
3554 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3555 {
3556  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3557  return opt_default(NULL, "video_size", arg);
3558 }
3559 
3560 static int opt_width(void *optctx, const char *opt, const char *arg)
3561 {
3562  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3563  return 0;
3564 }
3565 
3566 static int opt_height(void *optctx, const char *opt, const char *arg)
3567 {
3568  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3569  return 0;
3570 }
3571 
3572 static int opt_format(void *optctx, const char *opt, const char *arg)
3573 {
3574  file_iformat = av_find_input_format(arg);
3575  if (!file_iformat) {
3576  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3577  return AVERROR(EINVAL);
3578  }
3579  return 0;
3580 }
3581 
3582 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3583 {
3584  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3585  return opt_default(NULL, "pixel_format", arg);
3586 }
3587 
3588 static int opt_sync(void *optctx, const char *opt, const char *arg)
3589 {
3590  if (!strcmp(arg, "audio"))
3592  else if (!strcmp(arg, "video"))
3594  else if (!strcmp(arg, "ext"))
3596  else {
3597  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3598  exit(1);
3599  }
3600  return 0;
3601 }
3602 
3603 static int opt_seek(void *optctx, const char *opt, const char *arg)
3604 {
3605  start_time = parse_time_or_die(opt, arg, 1);
3606  return 0;
3607 }
3608 
3609 static int opt_duration(void *optctx, const char *opt, const char *arg)
3610 {
3611  duration = parse_time_or_die(opt, arg, 1);
3612  return 0;
3613 }
3614 
3615 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3616 {
3617  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3618  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3619  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3620  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3621  return 0;
3622 }
3623 
3624 static void opt_input_file(void *optctx, const char *filename)
3625 {
3626  if (input_filename) {
3628  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3629  filename, input_filename);
3630  exit(1);
3631  }
3632  if (!strcmp(filename, "-"))
3633  filename = "pipe:";
3634  input_filename = filename;
3635 }
3636 
3637 static int opt_codec(void *optctx, const char *opt, const char *arg)
3638 {
3639  const char *spec = strchr(opt, ':');
3640  if (!spec) {
3642  "No media specifier was specified in '%s' in option '%s'\n",
3643  arg, opt);
3644  return AVERROR(EINVAL);
3645  }
3646  spec++;
3647  switch (spec[0]) {
3648  case 'a' : audio_codec_name = arg; break;
3649  case 's' : subtitle_codec_name = arg; break;
3650  case 'v' : video_codec_name = arg; break;
3651  default:
3653  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3654  return AVERROR(EINVAL);
3655  }
3656  return 0;
3657 }
3658 
3659 static int dummy;
3660 
3661 static const OptionDef options[] = {
3662 #include "cmdutils_common_opts.h"
3663  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3664  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3665  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3666  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3667  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3668  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3669  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3670  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3671  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3672  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3673  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3674  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3675  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3676  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3677  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3678  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3679  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3680  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3681  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3682  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3683  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3684  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3685  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3686  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3687  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3688  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3689  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3690  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3691  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3692 #if CONFIG_AVFILTER
3693  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3694  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3695 #endif
3696  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3697  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3698  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3699  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3700  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3701  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3702  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3703  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3704  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3705  { NULL, },
3706 };
3707 
3708 static void show_usage(void)
3709 {
3710  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3711  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3712  av_log(NULL, AV_LOG_INFO, "\n");
3713 }
3714 
3715 void show_help_default(const char *opt, const char *arg)
3716 {
3718  show_usage();
3719  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3720  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3721  printf("\n");
3724 #if !CONFIG_AVFILTER
3726 #else
3728 #endif
3729  printf("\nWhile playing:\n"
3730  "q, ESC quit\n"
3731  "f toggle full screen\n"
3732  "p, SPC pause\n"
3733  "m toggle mute\n"
3734  "9, 0 decrease and increase volume respectively\n"
3735  "/, * decrease and increase volume respectively\n"
3736  "a cycle audio channel in the current program\n"
3737  "v cycle video channel\n"
3738  "t cycle subtitle channel in the current program\n"
3739  "c cycle program\n"
3740  "w cycle video filters or show modes\n"
3741  "s activate frame-step mode\n"
3742  "left/right seek backward/forward 10 seconds\n"
3743  "down/up seek backward/forward 1 minute\n"
3744  "page down/page up seek backward/forward 10 minutes\n"
3745  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3746  "left double-click toggle full screen\n"
3747  );
3748 }
3749 
3750 static int lockmgr(void **mtx, enum AVLockOp op)
3751 {
3752  switch(op) {
3753  case AV_LOCK_CREATE:
3754  *mtx = SDL_CreateMutex();
3755  if(!*mtx) {
3756  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3757  return 1;
3758  }
3759  return 0;
3760  case AV_LOCK_OBTAIN:
3761  return !!SDL_LockMutex(*mtx);
3762  case AV_LOCK_RELEASE:
3763  return !!SDL_UnlockMutex(*mtx);
3764  case AV_LOCK_DESTROY:
3765  SDL_DestroyMutex(*mtx);
3766  return 0;
3767  }
3768  return 1;
3769 }
3770 
3771 /* Called from the main */
3772 int main(int argc, char **argv)
3773 {
3774  int flags;
3775  VideoState *is;
3776  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3777  char alsa_bufsize[] = "SDL_AUDIO_ALSA_SET_BUFFER_SIZE=1";
3778 
3780  parse_loglevel(argc, argv, options);
3781 
3782  /* register all codecs, demux and protocols */
3783 #if CONFIG_AVDEVICE
3785 #endif
3786 #if CONFIG_AVFILTER
3788 #endif
3789  av_register_all();
3791 
3792  init_opts();
3793 
3794  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3795  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3796 
3797  show_banner(argc, argv, options);
3798 
3799  parse_options(NULL, argc, argv, options, opt_input_file);
3800 
3801  if (!input_filename) {
3802  show_usage();
3803  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3805  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3806  exit(1);
3807  }
3808 
3809  if (display_disable) {
3810  video_disable = 1;
3811  }
3812  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3813  if (audio_disable)
3814  flags &= ~SDL_INIT_AUDIO;
3815  else {
3816  /* Try to work around an occasional ALSA buffer underflow issue when the
3817  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3818  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3819  SDL_putenv(alsa_bufsize);
3820  }
3821  if (display_disable)
3822  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3823 #if !defined(_WIN32) && !defined(__APPLE__)
3824  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3825 #endif
3826  if (SDL_Init (flags)) {
3827  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3828  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3829  exit(1);
3830  }
3831 
3832  if (!display_disable) {
3833  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3834  fs_screen_width = vi->current_w;
3835  fs_screen_height = vi->current_h;
3836  }
3837 
3838  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3839  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3840  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3841 
3842  SDL_EnableKeyRepeat(SDL_DEFAULT_REPEAT_DELAY, SDL_DEFAULT_REPEAT_INTERVAL);
3843 
3845  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3846  do_exit(NULL);
3847  }
3848 
3849  av_init_packet(&flush_pkt);
3850  flush_pkt.data = (uint8_t *)&flush_pkt;
3851 
3852  is = stream_open(input_filename, file_iformat);
3853  if (!is) {
3854  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3855  do_exit(NULL);
3856  }
3857 
3858  event_loop(is);
3859 
3860  /* never returns */
3861 
3862  return 0;
3863 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1528
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:779
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:494
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3615
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:751
static void video_image_display(VideoState *is)
Definition: ffplay.c:939
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:634
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:490
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:469
const char * s
Definition: avisynth_c.h:631
int width
Definition: ffplay.c:295
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1308
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
enum AVSampleFormat fmt
Definition: ffplay.c:136
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3566
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
SDL_cond * cond
Definition: ffplay.c:177
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3382
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2561
FrameQueue pictq
Definition: ffplay.c:223
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
Definition: ffplay.c:1257
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:127
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3864
#define SWS_BICUBIC
Definition: swscale.h:58
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1577
AVOption.
Definition: opt.h:245
double rdftspeed
Definition: ffplay.c:346
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:313
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3624
double get_rotation(AVStream *st)
Definition: cmdutils.c:2066
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3741
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3572
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:6009
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1356
Main libavfilter public API header.
int rindex
Definition: ffplay.c:170
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:319
Memory buffer source API.
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
Definition: imgutils.c:191
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:187
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3998
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:963
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:122
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4602
static int64_t cur_time
Definition: ffserver.c:262
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3198
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3582
int size
Definition: avcodec.h:1581
const char * b
Definition: vf_curves.c:109
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1420
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:117
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1406
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:326
double audio_diff_cum
Definition: ffplay.c:240
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1427
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1133
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3637
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:108
int abort_request
Definition: ffplay.c:121
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2281
unsigned num_rects
Definition: avcodec.h:3902
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1320
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1433
int out_size
Definition: movenc.c:55
SDL_Rect last_display_rect
Definition: ffplay.c:291
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:242
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:508
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:784
int64_t channel_layout
Definition: ffplay.c:135
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:322
AVStream * audio_st
Definition: ffplay.c:244
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2589
static const char * audio_codec_name
Definition: ffplay.c:343
#define fn(a)
int serial
Definition: ffplay.c:156
AVCodec.
Definition: avcodec.h:3542
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3322
double pts_drift
Definition: ffplay.c:143
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1970
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3914
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:6006
int width
Definition: ffplay.c:163
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:782
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3149
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1786
void * opaque
Definition: avio.h:52
int viddec_width
Definition: ffplay.c:231
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:288
AVSubtitleRect ** rects
Definition: avcodec.h:3903
Format I/O context.
Definition: avformat.h:1325
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3289
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4625
Definition: ffplay.c:152
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:279
static int16_t block[64]
Definition: dct.c:113
int av_sync_type
Definition: ffplay.c:236
unsigned int nb_stream_indexes
Definition: avformat.h:1257
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:175
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3866
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:313
double pts
Definition: ffplay.c:157
static AVFilter ** last_filter
Definition: avfilter.c:514
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:241
AVRational start_pts_tb
Definition: ffplay.c:197
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:97
static int read_thread(void *arg)
Definition: ffplay.c:2827
int keep_last
Definition: ffplay.c:174
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:133
int rdft_bits
Definition: ffplay.c:271
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:878
int size
Definition: ffplay.c:119
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:704
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:102
static int64_t start_time
Definition: ffplay.c:330
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2418
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:89
Lock the mutex.
Definition: avcodec.h:6008
uint8_t
static int nb_streams
Definition: ffprobe.c:254
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:140
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:527
static int default_width
Definition: ffplay.c:318
int last_video_stream
Definition: ffplay.c:307
int width
Video only.
Definition: avcodec.h:3988
int last_subtitle_stream
Definition: ffplay.c:307
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:674
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:246
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1136
#define Y
Definition: vf_boxblur.c:76
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2797
int64_t duration
Definition: ffplay.c:120
struct SwrContext * swr_ctx
Definition: ffplay.c:260
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1598
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3350
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:377
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4646
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:487
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1437
static int framedrop
Definition: ffplay.c:340
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1652
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:75
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1393
AVPacket pkt
Definition: ffplay.c:111
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:4133
int bytes_per_sec
Definition: ffplay.c:138
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:132
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:358
#define height
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:388
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1436
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:514
static void sigterm_handler(int sig)
Definition: ffplay.c:1244
uint8_t * data
Definition: avcodec.h:1580
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:365
int freq
Definition: ffplay.c:133
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4537
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:169
Definition: mxfdec.c:266
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:141
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:155
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:488
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:801
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3867
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:511
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:158
static int64_t duration
Definition: ffplay.c:331
AVRational sar
Definition: ffplay.c:165
AVPacket pkt_temp
Definition: ffplay.c:189
#define A(x)
Definition: vp56_arith.h:28
unsigned int * stream_index
Definition: avformat.h:1256
#define av_log(a,...)
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1683
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:275
PacketQueue videoq
Definition: ffplay.c:285
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2027
AVDictionary * format_opts
Definition: cmdutils.c:69
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:300
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1439
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:511
Main libavdevice API header.
#define U(x)
Definition: vp56_arith.h:37
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3863
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3556
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3846
int audio_diff_avg_count
Definition: ffplay.c:243
const AVS_VideoInfo * vi
Definition: avisynth_c.h:658
int ytop
Definition: ffplay.c:295
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1539
int seek_req
Definition: ffplay.c:211
int(* callback)(void *)
Definition: avio.h:51
Create a mutex.
Definition: avcodec.h:6007
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:130
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1506
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:486
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:292
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3865
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:761
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:153
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2148
RDFTContext * rdft
Definition: ffplay.c:270
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:777
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:441
static int autorotate
Definition: ffplay.c:354
int capabilities
Codec capabilities.
Definition: avcodec.h:3561
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3927
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1457
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:202
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3918
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1744
int reallocate
Definition: ffplay.c:162
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:539
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:398
AVChapter ** chapters
Definition: avformat.h:1529
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1193
int video_stream
Definition: ffplay.c:283
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int * queue_serial
Definition: ffplay.c:148
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1340
int xpos
Definition: ffplay.c:273
int channels
Definition: ffplay.c:134
static enum ShowMode show_mode
Definition: ffplay.c:342
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1252
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:505
static const OptionDef options[]
Definition: ffplay.c:3661
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3659
#define fail()
Definition: checkasm.h:81
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:302
int8_t exp
Definition: eval.c:64
double audio_clock
Definition: ffplay.c:238
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2461
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:67
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3588
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2334
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2337
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3901
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:646
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3904
static int genpts
Definition: ffplay.c:333
static AVPacket flush_pkt
Definition: ffplay.c:360
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:349
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:519
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1566
static const char * subtitle_codec_name
Definition: ffplay.c:344
static int subtitle_disable
Definition: ffplay.c:324
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:135
int max_size
Definition: ffplay.c:173
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1381
int step
Definition: ffplay.c:296
SDL_Thread * decoder_tid
Definition: ffplay.c:200
static SDL_Surface * screen
Definition: ffplay.c:365
AVDictionary * opts
Definition: movenc.c:50
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:3936
SDL_mutex * mutex
Definition: ffplay.c:123
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:252
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:164
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
int linesize[4]
Definition: avcodec.h:3882
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:129
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:128
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
struct MyAVPacketList * next
Definition: ffplay.c:112
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:74
char filename[1024]
input or output filename
Definition: avformat.h:1401
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:176
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
int windex
Definition: ffplay.c:171
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:348
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:552
#define width
AVSubtitle sub
Definition: ffplay.c:154
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3750
int width
picture width / height.
Definition: avcodec.h:1836
int main(int argc, char **argv)
Definition: ffplay.c:3772
int height
Definition: ffplay.c:164
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3708
int nb_packets
Definition: ffplay.c:118
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3560
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1808
int frame_drops_late
Definition: ffplay.c:262
struct AudioParams audio_src
Definition: ffplay.c:255
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3305
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1334
static void blend_subrect(uint8_t **data, int *linesize, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:857
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:332
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2274
int last_i_start
Definition: ffplay.c:269
uint16_t format
Definition: avcodec.h:3899
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:117
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1449
int n
Definition: avisynth_c.h:547
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2405
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:357
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:815
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2803
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
#define src
Definition: vp9dsp.c:530
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:452
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3881
static int decoder_reorder_pts
Definition: ffplay.c:335
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1328
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:267
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:957
int paused
Definition: ffplay.c:147
static const char * input_filename
Definition: ffplay.c:314
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:851
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:714
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3715
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1158
int64_t pos
Definition: ffplay.c:159
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:307
Stream structure.
Definition: avformat.h:876
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3349
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1230
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:952
static int fs_screen_width
Definition: ffplay.c:316
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4550
char * filename
Definition: ffplay.c:294
static int screen_height
Definition: ffplay.c:321
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3609
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:231
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:336
AVFrame * frame
Definition: ffplay.c:153
int serial
Definition: ffplay.c:146
enum AVMediaType codec_type
Definition: avcodec.h:1657
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:752
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:908
attribute_deprecated int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2180
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1666
static void do_exit(VideoState *is)
Definition: ffplay.c:1226
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:267
int sample_rate
samples per second
Definition: avcodec.h:2410
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
AVIOContext * pb
I/O context.
Definition: avformat.h:1367
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:898
static int loop
Definition: ffplay.c:339
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:337
FFT functions.
main external API structure.
Definition: avcodec.h:1649
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:3063
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:563
Decoder subdec
Definition: ffplay.c:229
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:260
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:140
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2694
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
Clock vidclk
Definition: ffplay.c:220
int x
Definition: f_ebur128.c:91
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:753
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:491
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1487
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:315
double pts
Definition: ffplay.c:142
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:709
static int audio_thread(void *arg)
Definition: ffplay.c:2061
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
static int av_sync_type
Definition: ffplay.c:329
int pkt_serial
Definition: ffplay.c:192
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:697
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:118
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:344
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:900
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:960
PacketQueue audioq
Definition: ffplay.c:245
int packet_pending
Definition: ffplay.c:194
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:117
int64_t seek_pos
Definition: ffplay.c:213
rational number numerator/denominator
Definition: rational.h:43
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
int allocated
Definition: ffplay.c:161
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:290
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:276
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:146
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:992
SDL_cond * cond
Definition: ffplay.c:124
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2367
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:560
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:779
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2810
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1391
int queue_attachments_req
Definition: ffplay.c:210
unsigned nb_filters
Definition: avfilter.h:780
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1208
#define snprintf
Definition: snprintf.h:34
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:651
int error
contains the error code or 0 if no error happened
Definition: avio.h:228
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:695
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
#define FF_ALLOC_EVENT
Definition: ffplay.c:362
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1631
int audio_stream
Definition: ffplay.c:234
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2380
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:273
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:125
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2635
char * name
unique name for this input/output in the list
Definition: avfilter.h:954
static int64_t cursor_last_shown
Definition: ffplay.c:347
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:663
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3554
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:484
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:3068
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1701
static int flags
Definition: cpu.c:47
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1410
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
int frame_drops_early
Definition: ffplay.c:261
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2357
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
int sample_array_index
Definition: ffplay.c:268
SDL_cond * continue_read_thread
Definition: ffplay.c:309
int64_t start
Definition: avformat.h:1285
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:684
int sample_rate
Audio only.
Definition: avcodec.h:4032
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:783
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:280
double speed
Definition: ffplay.c:145
static int exit_on_mousedown
Definition: ffplay.c:338
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:1084
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1034
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static int video_thread(void *arg)
Definition: ffplay.c:2159
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:182
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1249
AVDictionary * codec_opts
Definition: cmdutils.c:69
struct AudioParams audio_tgt
Definition: ffplay.c:259
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:850
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:6010
if(ret< 0)
Definition: vf_mcdeint.c:282
uint8_t * audio_buf
Definition: ffplay.c:247
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
int muted
Definition: ffplay.c:254
static int display_disable
Definition: ffplay.c:327
static int video_disable
Definition: ffplay.c:323
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3268
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:923
signed 16 bits
Definition: samplefmt.h:61
int audio_buf_index
Definition: ffplay.c:251
uint8_t * audio_buf1
Definition: ffplay.c:248
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3603
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:146
static int screen_width
Definition: ffplay.c:320
PacketQueue * pktq
Definition: ffplay.c:178
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:934
uint32_t start_display_time
Definition: avcodec.h:3900
FFTSample * rdft_data
Definition: ffplay.c:272
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1499
int audio_clock_serial
Definition: ffplay.c:239
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
Definition: opt.h:565
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1284
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:86
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
PacketQueue subtitleq
Definition: ffplay.c:278
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1337
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4037
static int lowres
Definition: ffplay.c:334
int viddec_height
Definition: ffplay.c:232
int eof
Definition: ffplay.c:292
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:567
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:341
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:492
double duration
Definition: ffplay.c:158
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:87
int eof_reached
true if eof reached
Definition: avio.h:222
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:2411
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:250
SDL_Thread * read_tid
Definition: ffplay.c:204
AVPacket pkt
Definition: ffplay.c:188
int frame_size
Definition: ffplay.c:137
void av_log_set_flags(int arg)
Definition: log.c:396
int64_t start_pts
Definition: ffplay.c:196
int abort_request
Definition: ffplay.c:206
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:792
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:427
double last_updated
Definition: ffplay.c:144
Decoder viddec
Definition: ffplay.c:228
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:68
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:471
int height
Definition: ffplay.c:295
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:187
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1751
static void update_volume(VideoState *is, int sign, int step)
Definition: ffplay.c:1444
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:522
int channels
Audio only.
Definition: avcodec.h:4028
An instance of a filter.
Definition: avfilter.h:305
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1579
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1420
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
static const char * video_codec_name
Definition: ffplay.c:345
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3381
PacketQueue * queue
Definition: ffplay.c:190
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:660
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:719
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int subtitle_thread(void *arg)
Definition: ffplay.c:2268
FrameQueue subpq
Definition: ffplay.c:224
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Definition: avformat.h:1006
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1373
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:172
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:328
#define FF_QUIT_EVENT
Definition: ffplay.c:363
int xleft
Definition: ffplay.c:295
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2138
int stream_index
Definition: avcodec.h:1582
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:913
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:98
int subtitle_stream
Definition: ffplay.c:276
unsigned int audio_buf_size
Definition: ffplay.c:249
int64_t seek_rel
Definition: ffplay.c:214
int realtime
Definition: ffplay.c:217
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:231
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:936
static void video_display(VideoState *is)
Definition: ffplay.c:1298
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:325
AVSubtitleRect ** subrects
Definition: ffplay.c:155
SDL_Overlay * bmp
Definition: ffplay.c:160
static int show_status
Definition: ffplay.c:328
static int compute_mod(int a, int b)
Definition: ffplay.c:987
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1557
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:44
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:431
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2518
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1348
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:956
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3277
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1573
static int fs_screen_height
Definition: ffplay.c:317
double last_vis_time
Definition: ffplay.c:274
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:963
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define av_unused
Definition: attributes.h:126
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:155
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
AVFormatContext * ic
Definition: ffplay.c:216
simple arithmetic expression evaluator
#define V
Definition: avdct.c:30
int audio_volume
Definition: ffplay.c:253
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:735