FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/eval.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/imgutils.h"
39 #include "libavutil/dict.h"
40 #include "libavutil/parseutils.h"
41 #include "libavutil/samplefmt.h"
42 #include "libavutil/avassert.h"
43 #include "libavutil/time.h"
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswscale/swscale.h"
47 #include "libavutil/opt.h"
48 #include "libavcodec/avfft.h"
50 
51 #if CONFIG_AVFILTER
52 # include "libavfilter/avcodec.h"
53 # include "libavfilter/avfilter.h"
54 # include "libavfilter/buffersink.h"
55 # include "libavfilter/buffersrc.h"
56 #endif
57 
58 #include <SDL.h>
59 #include <SDL_thread.h>
60 
61 #include "cmdutils.h"
62 
63 #include <assert.h>
64 
65 const char program_name[] = "ffplay";
66 const int program_birth_year = 2003;
67 
68 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
69 #define MIN_FRAMES 5
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* no AV sync correction is done if below the minimum AV sync threshold */
77 #define AV_SYNC_THRESHOLD_MIN 0.04
78 /* AV sync correction is done if above the maximum AV sync threshold */
79 #define AV_SYNC_THRESHOLD_MAX 0.1
80 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
81 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
82 /* no AV correction is done if too big error */
83 #define AV_NOSYNC_THRESHOLD 10.0
84 
85 /* maximum audio speed change to get correct sync */
86 #define SAMPLE_CORRECTION_PERCENT_MAX 10
87 
88 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
89 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
90 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
91 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
92 
93 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
94 #define AUDIO_DIFF_AVG_NB 20
95 
96 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
97 #define REFRESH_RATE 0.01
98 
99 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
100 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
101 #define SAMPLE_ARRAY_SIZE (8 * 65536)
102 
103 #define CURSOR_HIDE_DELAY 1000000
104 
105 static int64_t sws_flags = SWS_BICUBIC;
106 
107 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
118  int serial;
119  SDL_mutex *mutex;
120  SDL_cond *cond;
121 } PacketQueue;
122 
123 #define VIDEO_PICTURE_QUEUE_SIZE 3
124 #define SUBPICTURE_QUEUE_SIZE 16
125 #define SAMPLE_QUEUE_SIZE 9
126 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
127 
128 typedef struct AudioParams {
129  int freq;
130  int channels;
131  int64_t channel_layout;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 /* Common struct for handling all types of decoded data and allocated render buffers. */
148 typedef struct Frame {
151  int serial;
152  double pts; /* presentation timestamp for the frame */
153  double duration; /* estimated duration of the frame */
154  int64_t pos; /* byte position of the frame in the input file */
155  SDL_Overlay *bmp;
158  int width;
159  int height;
161 } Frame;
162 
163 typedef struct FrameQueue {
165  int rindex;
166  int windex;
167  int size;
168  int max_size;
171  SDL_mutex *mutex;
172  SDL_cond *cond;
174 } FrameQueue;
175 
176 enum {
177  AV_SYNC_AUDIO_MASTER, /* default choice */
179  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
180 };
181 
182 typedef struct Decoder {
188  int finished;
190  SDL_cond *empty_queue_cond;
191  int64_t start_pts;
193  int64_t next_pts;
195  SDL_Thread *decoder_tid;
196 } Decoder;
197 
198 typedef struct VideoState {
199  SDL_Thread *read_tid;
203  int paused;
206  int seek_req;
208  int64_t seek_pos;
209  int64_t seek_rel;
212  int realtime;
213 
217 
221 
225 
227 
229 
230  double audio_clock;
232  double audio_diff_cum; /* used for AV difference average computation */
242  unsigned int audio_buf_size; /* in bytes */
243  unsigned int audio_buf1_size;
244  int audio_buf_index; /* in bytes */
247 #if CONFIG_AVFILTER
248  struct AudioParams audio_filter_src;
249 #endif
254 
255  enum ShowMode {
257  } show_mode;
264  int xpos;
266 
270 
271  double frame_timer;
277  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
278 #if !CONFIG_AVFILTER
280 #endif
282  int eof;
283 
284  char filename[1024];
286  int step;
287 
288 #if CONFIG_AVFILTER
289  int vfilter_idx;
290  AVFilterContext *in_video_filter; // the first filter in the video chain
291  AVFilterContext *out_video_filter; // the last filter in the video chain
292  AVFilterContext *in_audio_filter; // the first filter in the audio chain
293  AVFilterContext *out_audio_filter; // the last filter in the audio chain
294  AVFilterGraph *agraph; // audio filter graph
295 #endif
296 
298 
300 } VideoState;
301 
302 /* options specified by the user */
304 static const char *input_filename;
305 static const char *window_title;
306 static int fs_screen_width;
307 static int fs_screen_height;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int audio_disable;
313 static int video_disable;
314 static int subtitle_disable;
315 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
316 static int seek_by_bytes = -1;
317 static int display_disable;
318 static int show_status = 1;
320 static int64_t start_time = AV_NOPTS_VALUE;
321 static int64_t duration = AV_NOPTS_VALUE;
322 static int fast = 0;
323 static int genpts = 0;
324 static int lowres = 0;
325 static int decoder_reorder_pts = -1;
326 static int autoexit;
327 static int exit_on_keydown;
328 static int exit_on_mousedown;
329 static int loop = 1;
330 static int framedrop = -1;
331 static int infinite_buffer = -1;
332 static enum ShowMode show_mode = SHOW_MODE_NONE;
333 static const char *audio_codec_name;
334 static const char *subtitle_codec_name;
335 static const char *video_codec_name;
336 double rdftspeed = 0.02;
337 static int64_t cursor_last_shown;
338 static int cursor_hidden = 0;
339 #if CONFIG_AVFILTER
340 static const char **vfilters_list = NULL;
341 static int nb_vfilters = 0;
342 static char *afilters = NULL;
343 #endif
344 static int autorotate = 1;
345 
346 /* current context */
347 static int is_full_screen;
348 static int64_t audio_callback_time;
349 
351 
352 #define FF_ALLOC_EVENT (SDL_USEREVENT)
353 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
354 
355 static SDL_Surface *screen;
356 
357 #if CONFIG_AVFILTER
358 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
359 {
360  GROW_ARRAY(vfilters_list, nb_vfilters);
361  vfilters_list[nb_vfilters - 1] = arg;
362  return 0;
363 }
364 #endif
365 
366 static inline
367 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
368  enum AVSampleFormat fmt2, int64_t channel_count2)
369 {
370  /* If channel count == 1, planar and non-planar formats are the same */
371  if (channel_count1 == 1 && channel_count2 == 1)
373  else
374  return channel_count1 != channel_count2 || fmt1 != fmt2;
375 }
376 
377 static inline
378 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
379 {
380  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
381  return channel_layout;
382  else
383  return 0;
384 }
385 
386 static void free_picture(Frame *vp);
387 
389 {
390  MyAVPacketList *pkt1;
391 
392  if (q->abort_request)
393  return -1;
394 
395  pkt1 = av_malloc(sizeof(MyAVPacketList));
396  if (!pkt1)
397  return -1;
398  pkt1->pkt = *pkt;
399  pkt1->next = NULL;
400  if (pkt == &flush_pkt)
401  q->serial++;
402  pkt1->serial = q->serial;
403 
404  if (!q->last_pkt)
405  q->first_pkt = pkt1;
406  else
407  q->last_pkt->next = pkt1;
408  q->last_pkt = pkt1;
409  q->nb_packets++;
410  q->size += pkt1->pkt.size + sizeof(*pkt1);
411  /* XXX: should duplicate packet data in DV case */
412  SDL_CondSignal(q->cond);
413  return 0;
414 }
415 
417 {
418  int ret;
419 
420  /* duplicate the packet */
421  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
422  return -1;
423 
424  SDL_LockMutex(q->mutex);
425  ret = packet_queue_put_private(q, pkt);
426  SDL_UnlockMutex(q->mutex);
427 
428  if (pkt != &flush_pkt && ret < 0)
429  av_free_packet(pkt);
430 
431  return ret;
432 }
433 
434 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
435 {
436  AVPacket pkt1, *pkt = &pkt1;
437  av_init_packet(pkt);
438  pkt->data = NULL;
439  pkt->size = 0;
440  pkt->stream_index = stream_index;
441  return packet_queue_put(q, pkt);
442 }
443 
444 /* packet queue handling */
446 {
447  memset(q, 0, sizeof(PacketQueue));
448  q->mutex = SDL_CreateMutex();
449  q->cond = SDL_CreateCond();
450  q->abort_request = 1;
451 }
452 
454 {
455  MyAVPacketList *pkt, *pkt1;
456 
457  SDL_LockMutex(q->mutex);
458  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
459  pkt1 = pkt->next;
460  av_free_packet(&pkt->pkt);
461  av_freep(&pkt);
462  }
463  q->last_pkt = NULL;
464  q->first_pkt = NULL;
465  q->nb_packets = 0;
466  q->size = 0;
467  SDL_UnlockMutex(q->mutex);
468 }
469 
471 {
473  SDL_DestroyMutex(q->mutex);
474  SDL_DestroyCond(q->cond);
475 }
476 
478 {
479  SDL_LockMutex(q->mutex);
480 
481  q->abort_request = 1;
482 
483  SDL_CondSignal(q->cond);
484 
485  SDL_UnlockMutex(q->mutex);
486 }
487 
489 {
490  SDL_LockMutex(q->mutex);
491  q->abort_request = 0;
492  packet_queue_put_private(q, &flush_pkt);
493  SDL_UnlockMutex(q->mutex);
494 }
495 
496 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
497 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
498 {
499  MyAVPacketList *pkt1;
500  int ret;
501 
502  SDL_LockMutex(q->mutex);
503 
504  for (;;) {
505  if (q->abort_request) {
506  ret = -1;
507  break;
508  }
509 
510  pkt1 = q->first_pkt;
511  if (pkt1) {
512  q->first_pkt = pkt1->next;
513  if (!q->first_pkt)
514  q->last_pkt = NULL;
515  q->nb_packets--;
516  q->size -= pkt1->pkt.size + sizeof(*pkt1);
517  *pkt = pkt1->pkt;
518  if (serial)
519  *serial = pkt1->serial;
520  av_free(pkt1);
521  ret = 1;
522  break;
523  } else if (!block) {
524  ret = 0;
525  break;
526  } else {
527  SDL_CondWait(q->cond, q->mutex);
528  }
529  }
530  SDL_UnlockMutex(q->mutex);
531  return ret;
532 }
533 
534 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
535  memset(d, 0, sizeof(Decoder));
536  d->avctx = avctx;
537  d->queue = queue;
538  d->empty_queue_cond = empty_queue_cond;
540 }
541 
543  int got_frame = 0;
544 
545  do {
546  int ret = -1;
547 
548  if (d->queue->abort_request)
549  return -1;
550 
551  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
552  AVPacket pkt;
553  do {
554  if (d->queue->nb_packets == 0)
555  SDL_CondSignal(d->empty_queue_cond);
556  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
557  return -1;
558  if (pkt.data == flush_pkt.data) {
560  d->finished = 0;
561  d->next_pts = d->start_pts;
562  d->next_pts_tb = d->start_pts_tb;
563  }
564  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
565  av_free_packet(&d->pkt);
566  d->pkt_temp = d->pkt = pkt;
567  d->packet_pending = 1;
568  }
569 
570  switch (d->avctx->codec_type) {
571  case AVMEDIA_TYPE_VIDEO:
572  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
573  if (got_frame) {
574  if (decoder_reorder_pts == -1) {
575  frame->pts = av_frame_get_best_effort_timestamp(frame);
576  } else if (decoder_reorder_pts) {
577  frame->pts = frame->pkt_pts;
578  } else {
579  frame->pts = frame->pkt_dts;
580  }
581  }
582  break;
583  case AVMEDIA_TYPE_AUDIO:
584  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
585  if (got_frame) {
586  AVRational tb = (AVRational){1, frame->sample_rate};
587  if (frame->pts != AV_NOPTS_VALUE)
588  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
589  else if (frame->pkt_pts != AV_NOPTS_VALUE)
590  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
591  else if (d->next_pts != AV_NOPTS_VALUE)
592  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
593  if (frame->pts != AV_NOPTS_VALUE) {
594  d->next_pts = frame->pts + frame->nb_samples;
595  d->next_pts_tb = tb;
596  }
597  }
598  break;
600  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
601  break;
602  }
603 
604  if (ret < 0) {
605  d->packet_pending = 0;
606  } else {
607  d->pkt_temp.dts =
609  if (d->pkt_temp.data) {
611  ret = d->pkt_temp.size;
612  d->pkt_temp.data += ret;
613  d->pkt_temp.size -= ret;
614  if (d->pkt_temp.size <= 0)
615  d->packet_pending = 0;
616  } else {
617  if (!got_frame) {
618  d->packet_pending = 0;
619  d->finished = d->pkt_serial;
620  }
621  }
622  }
623  } while (!got_frame && !d->finished);
624 
625  return got_frame;
626 }
627 
628 static void decoder_destroy(Decoder *d) {
629  av_free_packet(&d->pkt);
630 }
631 
633 {
634  av_frame_unref(vp->frame);
635  avsubtitle_free(&vp->sub);
636 }
637 
638 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
639 {
640  int i;
641  memset(f, 0, sizeof(FrameQueue));
642  if (!(f->mutex = SDL_CreateMutex()))
643  return AVERROR(ENOMEM);
644  if (!(f->cond = SDL_CreateCond()))
645  return AVERROR(ENOMEM);
646  f->pktq = pktq;
647  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
648  f->keep_last = !!keep_last;
649  for (i = 0; i < f->max_size; i++)
650  if (!(f->queue[i].frame = av_frame_alloc()))
651  return AVERROR(ENOMEM);
652  return 0;
653 }
654 
656 {
657  int i;
658  for (i = 0; i < f->max_size; i++) {
659  Frame *vp = &f->queue[i];
661  av_frame_free(&vp->frame);
662  free_picture(vp);
663  }
664  SDL_DestroyMutex(f->mutex);
665  SDL_DestroyCond(f->cond);
666 }
667 
669 {
670  SDL_LockMutex(f->mutex);
671  SDL_CondSignal(f->cond);
672  SDL_UnlockMutex(f->mutex);
673 }
674 
676 {
677  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
678 }
679 
681 {
682  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
683 }
684 
686 {
687  return &f->queue[f->rindex];
688 }
689 
691 {
692  /* wait until we have space to put a new frame */
693  SDL_LockMutex(f->mutex);
694  while (f->size >= f->max_size &&
695  !f->pktq->abort_request) {
696  SDL_CondWait(f->cond, f->mutex);
697  }
698  SDL_UnlockMutex(f->mutex);
699 
700  if (f->pktq->abort_request)
701  return NULL;
702 
703  return &f->queue[f->windex];
704 }
705 
707 {
708  /* wait until we have a readable a new frame */
709  SDL_LockMutex(f->mutex);
710  while (f->size - f->rindex_shown <= 0 &&
711  !f->pktq->abort_request) {
712  SDL_CondWait(f->cond, f->mutex);
713  }
714  SDL_UnlockMutex(f->mutex);
715 
716  if (f->pktq->abort_request)
717  return NULL;
718 
719  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
720 }
721 
723 {
724  if (++f->windex == f->max_size)
725  f->windex = 0;
726  SDL_LockMutex(f->mutex);
727  f->size++;
728  SDL_CondSignal(f->cond);
729  SDL_UnlockMutex(f->mutex);
730 }
731 
733 {
734  if (f->keep_last && !f->rindex_shown) {
735  f->rindex_shown = 1;
736  return;
737  }
739  if (++f->rindex == f->max_size)
740  f->rindex = 0;
741  SDL_LockMutex(f->mutex);
742  f->size--;
743  SDL_CondSignal(f->cond);
744  SDL_UnlockMutex(f->mutex);
745 }
746 
747 /* jump back to the previous frame if available by resetting rindex_shown */
749 {
750  int ret = f->rindex_shown;
751  f->rindex_shown = 0;
752  return ret;
753 }
754 
755 /* return the number of undisplayed frames in the queue */
757 {
758  return f->size - f->rindex_shown;
759 }
760 
761 /* return last shown position */
763 {
764  Frame *fp = &f->queue[f->rindex];
765  if (f->rindex_shown && fp->serial == f->pktq->serial)
766  return fp->pos;
767  else
768  return -1;
769 }
770 
771 static void decoder_abort(Decoder *d, FrameQueue *fq)
772 {
774  frame_queue_signal(fq);
775  SDL_WaitThread(d->decoder_tid, NULL);
776  d->decoder_tid = NULL;
778 }
779 
780 static inline void fill_rectangle(SDL_Surface *screen,
781  int x, int y, int w, int h, int color, int update)
782 {
783  SDL_Rect rect;
784  rect.x = x;
785  rect.y = y;
786  rect.w = w;
787  rect.h = h;
788  SDL_FillRect(screen, &rect, color);
789  if (update && w > 0 && h > 0)
790  SDL_UpdateRect(screen, x, y, w, h);
791 }
792 
793 /* draw only the border of a rectangle */
794 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
795 {
796  int w1, w2, h1, h2;
797 
798  /* fill the background */
799  w1 = x;
800  if (w1 < 0)
801  w1 = 0;
802  w2 = width - (x + w);
803  if (w2 < 0)
804  w2 = 0;
805  h1 = y;
806  if (h1 < 0)
807  h1 = 0;
808  h2 = height - (y + h);
809  if (h2 < 0)
810  h2 = 0;
812  xleft, ytop,
813  w1, height,
814  color, update);
816  xleft + width - w2, ytop,
817  w2, height,
818  color, update);
820  xleft + w1, ytop,
821  width - w1 - w2, h1,
822  color, update);
824  xleft + w1, ytop + height - h2,
825  width - w1 - w2, h2,
826  color, update);
827 }
828 
829 #define ALPHA_BLEND(a, oldp, newp, s)\
830 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
831 
832 #define RGBA_IN(r, g, b, a, s)\
833 {\
834  unsigned int v = ((const uint32_t *)(s))[0];\
835  a = (v >> 24) & 0xff;\
836  r = (v >> 16) & 0xff;\
837  g = (v >> 8) & 0xff;\
838  b = v & 0xff;\
839 }
840 
841 #define YUVA_IN(y, u, v, a, s, pal)\
842 {\
843  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
844  a = (val >> 24) & 0xff;\
845  y = (val >> 16) & 0xff;\
846  u = (val >> 8) & 0xff;\
847  v = val & 0xff;\
848 }
849 
850 #define YUVA_OUT(d, y, u, v, a)\
851 {\
852  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
853 }
854 
855 
856 #define BPP 1
857 
858 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
859 {
860  int wrap, wrap3, width2, skip2;
861  int y, u, v, a, u1, v1, a1, w, h;
862  uint8_t *lum, *cb, *cr;
863  const uint8_t *p;
864  const uint32_t *pal;
865  int dstx, dsty, dstw, dsth;
866 
867  dstw = av_clip(rect->w, 0, imgw);
868  dsth = av_clip(rect->h, 0, imgh);
869  dstx = av_clip(rect->x, 0, imgw - dstw);
870  dsty = av_clip(rect->y, 0, imgh - dsth);
871  lum = dst->data[0] + dsty * dst->linesize[0];
872  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
873  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
874 
875  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
876  skip2 = dstx >> 1;
877  wrap = dst->linesize[0];
878  wrap3 = rect->pict.linesize[0];
879  p = rect->pict.data[0];
880  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
881 
882  if (dsty & 1) {
883  lum += dstx;
884  cb += skip2;
885  cr += skip2;
886 
887  if (dstx & 1) {
888  YUVA_IN(y, u, v, a, p, pal);
889  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
890  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
891  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
892  cb++;
893  cr++;
894  lum++;
895  p += BPP;
896  }
897  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
898  YUVA_IN(y, u, v, a, p, pal);
899  u1 = u;
900  v1 = v;
901  a1 = a;
902  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
903 
904  YUVA_IN(y, u, v, a, p + BPP, pal);
905  u1 += u;
906  v1 += v;
907  a1 += a;
908  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
909  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
910  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
911  cb++;
912  cr++;
913  p += 2 * BPP;
914  lum += 2;
915  }
916  if (w) {
917  YUVA_IN(y, u, v, a, p, pal);
918  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
919  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
920  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
921  p++;
922  lum++;
923  }
924  p += wrap3 - dstw * BPP;
925  lum += wrap - dstw - dstx;
926  cb += dst->linesize[1] - width2 - skip2;
927  cr += dst->linesize[2] - width2 - skip2;
928  }
929  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
930  lum += dstx;
931  cb += skip2;
932  cr += skip2;
933 
934  if (dstx & 1) {
935  YUVA_IN(y, u, v, a, p, pal);
936  u1 = u;
937  v1 = v;
938  a1 = a;
939  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
940  p += wrap3;
941  lum += wrap;
942  YUVA_IN(y, u, v, a, p, pal);
943  u1 += u;
944  v1 += v;
945  a1 += a;
946  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
947  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
948  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
949  cb++;
950  cr++;
951  p += -wrap3 + BPP;
952  lum += -wrap + 1;
953  }
954  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
955  YUVA_IN(y, u, v, a, p, pal);
956  u1 = u;
957  v1 = v;
958  a1 = a;
959  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
960 
961  YUVA_IN(y, u, v, a, p + BPP, pal);
962  u1 += u;
963  v1 += v;
964  a1 += a;
965  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
966  p += wrap3;
967  lum += wrap;
968 
969  YUVA_IN(y, u, v, a, p, pal);
970  u1 += u;
971  v1 += v;
972  a1 += a;
973  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
974 
975  YUVA_IN(y, u, v, a, p + BPP, pal);
976  u1 += u;
977  v1 += v;
978  a1 += a;
979  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
980 
981  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
982  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
983 
984  cb++;
985  cr++;
986  p += -wrap3 + 2 * BPP;
987  lum += -wrap + 2;
988  }
989  if (w) {
990  YUVA_IN(y, u, v, a, p, pal);
991  u1 = u;
992  v1 = v;
993  a1 = a;
994  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
995  p += wrap3;
996  lum += wrap;
997  YUVA_IN(y, u, v, a, p, pal);
998  u1 += u;
999  v1 += v;
1000  a1 += a;
1001  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1002  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
1003  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
1004  cb++;
1005  cr++;
1006  p += -wrap3 + BPP;
1007  lum += -wrap + 1;
1008  }
1009  p += wrap3 + (wrap3 - dstw * BPP);
1010  lum += wrap + (wrap - dstw - dstx);
1011  cb += dst->linesize[1] - width2 - skip2;
1012  cr += dst->linesize[2] - width2 - skip2;
1013  }
1014  /* handle odd height */
1015  if (h) {
1016  lum += dstx;
1017  cb += skip2;
1018  cr += skip2;
1019 
1020  if (dstx & 1) {
1021  YUVA_IN(y, u, v, a, p, pal);
1022  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1023  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1024  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1025  cb++;
1026  cr++;
1027  lum++;
1028  p += BPP;
1029  }
1030  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
1031  YUVA_IN(y, u, v, a, p, pal);
1032  u1 = u;
1033  v1 = v;
1034  a1 = a;
1035  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1036 
1037  YUVA_IN(y, u, v, a, p + BPP, pal);
1038  u1 += u;
1039  v1 += v;
1040  a1 += a;
1041  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
1042  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
1043  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
1044  cb++;
1045  cr++;
1046  p += 2 * BPP;
1047  lum += 2;
1048  }
1049  if (w) {
1050  YUVA_IN(y, u, v, a, p, pal);
1051  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1052  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1053  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1054  }
1055  }
1056 }
1057 
1058 static void free_picture(Frame *vp)
1059 {
1060  if (vp->bmp) {
1061  SDL_FreeYUVOverlay(vp->bmp);
1062  vp->bmp = NULL;
1063  }
1064 }
1065 
1066 static void calculate_display_rect(SDL_Rect *rect,
1067  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
1068  int pic_width, int pic_height, AVRational pic_sar)
1069 {
1070  float aspect_ratio;
1071  int width, height, x, y;
1072 
1073  if (pic_sar.num == 0)
1074  aspect_ratio = 0;
1075  else
1076  aspect_ratio = av_q2d(pic_sar);
1077 
1078  if (aspect_ratio <= 0.0)
1079  aspect_ratio = 1.0;
1080  aspect_ratio *= (float)pic_width / (float)pic_height;
1081 
1082  /* XXX: we suppose the screen has a 1.0 pixel ratio */
1083  height = scr_height;
1084  width = ((int)rint(height * aspect_ratio)) & ~1;
1085  if (width > scr_width) {
1086  width = scr_width;
1087  height = ((int)rint(width / aspect_ratio)) & ~1;
1088  }
1089  x = (scr_width - width) / 2;
1090  y = (scr_height - height) / 2;
1091  rect->x = scr_xleft + x;
1092  rect->y = scr_ytop + y;
1093  rect->w = FFMAX(width, 1);
1094  rect->h = FFMAX(height, 1);
1095 }
1096 
1098 {
1099  Frame *vp;
1100  Frame *sp;
1101  AVPicture pict;
1102  SDL_Rect rect;
1103  int i;
1104 
1105  vp = frame_queue_peek(&is->pictq);
1106  if (vp->bmp) {
1107  if (is->subtitle_st) {
1108  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1109  sp = frame_queue_peek(&is->subpq);
1110 
1111  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1112  SDL_LockYUVOverlay (vp->bmp);
1113 
1114  pict.data[0] = vp->bmp->pixels[0];
1115  pict.data[1] = vp->bmp->pixels[2];
1116  pict.data[2] = vp->bmp->pixels[1];
1117 
1118  pict.linesize[0] = vp->bmp->pitches[0];
1119  pict.linesize[1] = vp->bmp->pitches[2];
1120  pict.linesize[2] = vp->bmp->pitches[1];
1121 
1122  for (i = 0; i < sp->sub.num_rects; i++)
1123  blend_subrect(&pict, sp->sub.rects[i],
1124  vp->bmp->w, vp->bmp->h);
1125 
1126  SDL_UnlockYUVOverlay (vp->bmp);
1127  }
1128  }
1129  }
1130 
1131  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1132 
1133  SDL_DisplayYUVOverlay(vp->bmp, &rect);
1134 
1135  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1136  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1137  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1138  is->last_display_rect = rect;
1139  }
1140  }
1141 }
1142 
1143 static inline int compute_mod(int a, int b)
1144 {
1145  return a < 0 ? a%b + b : a%b;
1146 }
1147 
1149 {
1150  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1151  int ch, channels, h, h2, bgcolor, fgcolor;
1152  int64_t time_diff;
1153  int rdft_bits, nb_freq;
1154 
1155  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1156  ;
1157  nb_freq = 1 << (rdft_bits - 1);
1158 
1159  /* compute display index : center on currently output samples */
1160  channels = s->audio_tgt.channels;
1161  nb_display_channels = channels;
1162  if (!s->paused) {
1163  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1164  n = 2 * channels;
1165  delay = s->audio_write_buf_size;
1166  delay /= n;
1167 
1168  /* to be more precise, we take into account the time spent since
1169  the last buffer computation */
1170  if (audio_callback_time) {
1171  time_diff = av_gettime_relative() - audio_callback_time;
1172  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1173  }
1174 
1175  delay += 2 * data_used;
1176  if (delay < data_used)
1177  delay = data_used;
1178 
1179  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1180  if (s->show_mode == SHOW_MODE_WAVES) {
1181  h = INT_MIN;
1182  for (i = 0; i < 1000; i += channels) {
1183  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1184  int a = s->sample_array[idx];
1185  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1186  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1187  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1188  int score = a - d;
1189  if (h < score && (b ^ c) < 0) {
1190  h = score;
1191  i_start = idx;
1192  }
1193  }
1194  }
1195 
1196  s->last_i_start = i_start;
1197  } else {
1198  i_start = s->last_i_start;
1199  }
1200 
1201  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1202  if (s->show_mode == SHOW_MODE_WAVES) {
1204  s->xleft, s->ytop, s->width, s->height,
1205  bgcolor, 0);
1206 
1207  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1208 
1209  /* total height for one channel */
1210  h = s->height / nb_display_channels;
1211  /* graph height / 2 */
1212  h2 = (h * 9) / 20;
1213  for (ch = 0; ch < nb_display_channels; ch++) {
1214  i = i_start + ch;
1215  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1216  for (x = 0; x < s->width; x++) {
1217  y = (s->sample_array[i] * h2) >> 15;
1218  if (y < 0) {
1219  y = -y;
1220  ys = y1 - y;
1221  } else {
1222  ys = y1;
1223  }
1225  s->xleft + x, ys, 1, y,
1226  fgcolor, 0);
1227  i += channels;
1228  if (i >= SAMPLE_ARRAY_SIZE)
1229  i -= SAMPLE_ARRAY_SIZE;
1230  }
1231  }
1232 
1233  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1234 
1235  for (ch = 1; ch < nb_display_channels; ch++) {
1236  y = s->ytop + ch * h;
1238  s->xleft, y, s->width, 1,
1239  fgcolor, 0);
1240  }
1241  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1242  } else {
1243  nb_display_channels= FFMIN(nb_display_channels, 2);
1244  if (rdft_bits != s->rdft_bits) {
1245  av_rdft_end(s->rdft);
1246  av_free(s->rdft_data);
1247  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1248  s->rdft_bits = rdft_bits;
1249  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1250  }
1251  if (!s->rdft || !s->rdft_data){
1252  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1253  s->show_mode = SHOW_MODE_WAVES;
1254  } else {
1255  FFTSample *data[2];
1256  for (ch = 0; ch < nb_display_channels; ch++) {
1257  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1258  i = i_start + ch;
1259  for (x = 0; x < 2 * nb_freq; x++) {
1260  double w = (x-nb_freq) * (1.0 / nb_freq);
1261  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1262  i += channels;
1263  if (i >= SAMPLE_ARRAY_SIZE)
1264  i -= SAMPLE_ARRAY_SIZE;
1265  }
1266  av_rdft_calc(s->rdft, data[ch]);
1267  }
1268  /* Least efficient way to do this, we should of course
1269  * directly access it but it is more than fast enough. */
1270  for (y = 0; y < s->height; y++) {
1271  double w = 1 / sqrt(nb_freq);
1272  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1273  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1274  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1275  a = FFMIN(a, 255);
1276  b = FFMIN(b, 255);
1277  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1278 
1280  s->xpos, s->height-y, 1, 1,
1281  fgcolor, 0);
1282  }
1283  }
1284  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1285  if (!s->paused)
1286  s->xpos++;
1287  if (s->xpos >= s->width)
1288  s->xpos= s->xleft;
1289  }
1290 }
1291 
1292 static void stream_close(VideoState *is)
1293 {
1294  /* XXX: use a special url_shutdown call to abort parse cleanly */
1295  is->abort_request = 1;
1296  SDL_WaitThread(is->read_tid, NULL);
1300 
1301  /* free all pictures */
1302  frame_queue_destory(&is->pictq);
1303  frame_queue_destory(&is->sampq);
1304  frame_queue_destory(&is->subpq);
1305  SDL_DestroyCond(is->continue_read_thread);
1306 #if !CONFIG_AVFILTER
1308 #endif
1309  av_free(is);
1310 }
1311 
1312 static void do_exit(VideoState *is)
1313 {
1314  if (is) {
1315  stream_close(is);
1316  }
1318  uninit_opts();
1319 #if CONFIG_AVFILTER
1320  av_freep(&vfilters_list);
1321 #endif
1323  if (show_status)
1324  printf("\n");
1325  SDL_Quit();
1326  av_log(NULL, AV_LOG_QUIET, "%s", "");
1327  exit(0);
1328 }
1329 
1330 static void sigterm_handler(int sig)
1331 {
1332  exit(123);
1333 }
1334 
1336 {
1337  SDL_Rect rect;
1338  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1339  default_width = rect.w;
1340  default_height = rect.h;
1341 }
1342 
1343 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1344 {
1345  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1346  int w,h;
1347 
1348  if (is_full_screen) flags |= SDL_FULLSCREEN;
1349  else flags |= SDL_RESIZABLE;
1350 
1351  if (vp && vp->width)
1352  set_default_window_size(vp->width, vp->height, vp->sar);
1353 
1355  w = fs_screen_width;
1356  h = fs_screen_height;
1357  } else if (!is_full_screen && screen_width) {
1358  w = screen_width;
1359  h = screen_height;
1360  } else {
1361  w = default_width;
1362  h = default_height;
1363  }
1364  w = FFMIN(16383, w);
1365  if (screen && is->width == screen->w && screen->w == w
1366  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1367  return 0;
1368  screen = SDL_SetVideoMode(w, h, 0, flags);
1369  if (!screen) {
1370  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1371  do_exit(is);
1372  }
1373  if (!window_title)
1375  SDL_WM_SetCaption(window_title, window_title);
1376 
1377  is->width = screen->w;
1378  is->height = screen->h;
1379 
1380  return 0;
1381 }
1382 
1383 /* display the current picture, if any */
1384 static void video_display(VideoState *is)
1385 {
1386  if (!screen)
1387  video_open(is, 0, NULL);
1388  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1389  video_audio_display(is);
1390  else if (is->video_st)
1391  video_image_display(is);
1392 }
1393 
1394 static double get_clock(Clock *c)
1395 {
1396  if (*c->queue_serial != c->serial)
1397  return NAN;
1398  if (c->paused) {
1399  return c->pts;
1400  } else {
1401  double time = av_gettime_relative() / 1000000.0;
1402  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1403  }
1404 }
1405 
1406 static void set_clock_at(Clock *c, double pts, int serial, double time)
1407 {
1408  c->pts = pts;
1409  c->last_updated = time;
1410  c->pts_drift = c->pts - time;
1411  c->serial = serial;
1412 }
1413 
1414 static void set_clock(Clock *c, double pts, int serial)
1415 {
1416  double time = av_gettime_relative() / 1000000.0;
1417  set_clock_at(c, pts, serial, time);
1418 }
1419 
1420 static void set_clock_speed(Clock *c, double speed)
1421 {
1422  set_clock(c, get_clock(c), c->serial);
1423  c->speed = speed;
1424 }
1425 
1426 static void init_clock(Clock *c, int *queue_serial)
1427 {
1428  c->speed = 1.0;
1429  c->paused = 0;
1430  c->queue_serial = queue_serial;
1431  set_clock(c, NAN, -1);
1432 }
1433 
1434 static void sync_clock_to_slave(Clock *c, Clock *slave)
1435 {
1436  double clock = get_clock(c);
1437  double slave_clock = get_clock(slave);
1438  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1439  set_clock(c, slave_clock, slave->serial);
1440 }
1441 
1443  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1444  if (is->video_st)
1445  return AV_SYNC_VIDEO_MASTER;
1446  else
1447  return AV_SYNC_AUDIO_MASTER;
1448  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1449  if (is->audio_st)
1450  return AV_SYNC_AUDIO_MASTER;
1451  else
1452  return AV_SYNC_EXTERNAL_CLOCK;
1453  } else {
1454  return AV_SYNC_EXTERNAL_CLOCK;
1455  }
1456 }
1457 
1458 /* get the current master clock value */
1459 static double get_master_clock(VideoState *is)
1460 {
1461  double val;
1462 
1463  switch (get_master_sync_type(is)) {
1464  case AV_SYNC_VIDEO_MASTER:
1465  val = get_clock(&is->vidclk);
1466  break;
1467  case AV_SYNC_AUDIO_MASTER:
1468  val = get_clock(&is->audclk);
1469  break;
1470  default:
1471  val = get_clock(&is->extclk);
1472  break;
1473  }
1474  return val;
1475 }
1476 
1478  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1479  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1481  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1482  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1484  } else {
1485  double speed = is->extclk.speed;
1486  if (speed != 1.0)
1487  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1488  }
1489 }
1490 
1491 /* seek in the stream */
1492 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1493 {
1494  if (!is->seek_req) {
1495  is->seek_pos = pos;
1496  is->seek_rel = rel;
1497  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1498  if (seek_by_bytes)
1500  is->seek_req = 1;
1501  SDL_CondSignal(is->continue_read_thread);
1502  }
1503 }
1504 
1505 /* pause or resume the video */
1507 {
1508  if (is->paused) {
1509  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1510  if (is->read_pause_return != AVERROR(ENOSYS)) {
1511  is->vidclk.paused = 0;
1512  }
1513  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1514  }
1515  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1516  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1517 }
1518 
1519 static void toggle_pause(VideoState *is)
1520 {
1521  stream_toggle_pause(is);
1522  is->step = 0;
1523 }
1524 
1526 {
1527  /* if the stream is paused unpause it, then step */
1528  if (is->paused)
1529  stream_toggle_pause(is);
1530  is->step = 1;
1531 }
1532 
1533 static double compute_target_delay(double delay, VideoState *is)
1534 {
1535  double sync_threshold, diff = 0;
1536 
1537  /* update delay to follow master synchronisation source */
1539  /* if video is slave, we try to correct big delays by
1540  duplicating or deleting a frame */
1541  diff = get_clock(&is->vidclk) - get_master_clock(is);
1542 
1543  /* skip or repeat frame. We take into account the
1544  delay to compute the threshold. I still don't know
1545  if it is the best guess */
1546  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1547  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1548  if (diff <= -sync_threshold)
1549  delay = FFMAX(0, delay + diff);
1550  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1551  delay = delay + diff;
1552  else if (diff >= sync_threshold)
1553  delay = 2 * delay;
1554  }
1555  }
1556 
1557  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1558  delay, -diff);
1559 
1560  return delay;
1561 }
1562 
1563 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1564  if (vp->serial == nextvp->serial) {
1565  double duration = nextvp->pts - vp->pts;
1566  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1567  return vp->duration;
1568  else
1569  return duration;
1570  } else {
1571  return 0.0;
1572  }
1573 }
1574 
1575 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1576  /* update current video pts */
1577  set_clock(&is->vidclk, pts, serial);
1578  sync_clock_to_slave(&is->extclk, &is->vidclk);
1579 }
1580 
1581 /* called to display each frame */
1582 static void video_refresh(void *opaque, double *remaining_time)
1583 {
1584  VideoState *is = opaque;
1585  double time;
1586 
1587  Frame *sp, *sp2;
1588 
1589  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1591 
1592  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1593  time = av_gettime_relative() / 1000000.0;
1594  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1595  video_display(is);
1596  is->last_vis_time = time;
1597  }
1598  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1599  }
1600 
1601  if (is->video_st) {
1602  int redisplay = 0;
1603  if (is->force_refresh)
1604  redisplay = frame_queue_prev(&is->pictq);
1605 retry:
1606  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1607  // nothing to do, no picture to display in the queue
1608  } else {
1609  double last_duration, duration, delay;
1610  Frame *vp, *lastvp;
1611 
1612  /* dequeue the picture */
1613  lastvp = frame_queue_peek_last(&is->pictq);
1614  vp = frame_queue_peek(&is->pictq);
1615 
1616  if (vp->serial != is->videoq.serial) {
1617  frame_queue_next(&is->pictq);
1618  redisplay = 0;
1619  goto retry;
1620  }
1621 
1622  if (lastvp->serial != vp->serial && !redisplay)
1623  is->frame_timer = av_gettime_relative() / 1000000.0;
1624 
1625  if (is->paused)
1626  goto display;
1627 
1628  /* compute nominal last_duration */
1629  last_duration = vp_duration(is, lastvp, vp);
1630  if (redisplay)
1631  delay = 0.0;
1632  else
1633  delay = compute_target_delay(last_duration, is);
1634 
1635  time= av_gettime_relative()/1000000.0;
1636  if (time < is->frame_timer + delay && !redisplay) {
1637  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1638  return;
1639  }
1640 
1641  is->frame_timer += delay;
1642  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1643  is->frame_timer = time;
1644 
1645  SDL_LockMutex(is->pictq.mutex);
1646  if (!redisplay && !isnan(vp->pts))
1647  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1648  SDL_UnlockMutex(is->pictq.mutex);
1649 
1650  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1651  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1652  duration = vp_duration(is, vp, nextvp);
1653  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1654  if (!redisplay)
1655  is->frame_drops_late++;
1656  frame_queue_next(&is->pictq);
1657  redisplay = 0;
1658  goto retry;
1659  }
1660  }
1661 
1662  if (is->subtitle_st) {
1663  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1664  sp = frame_queue_peek(&is->subpq);
1665 
1666  if (frame_queue_nb_remaining(&is->subpq) > 1)
1667  sp2 = frame_queue_peek_next(&is->subpq);
1668  else
1669  sp2 = NULL;
1670 
1671  if (sp->serial != is->subtitleq.serial
1672  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1673  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1674  {
1675  frame_queue_next(&is->subpq);
1676  } else {
1677  break;
1678  }
1679  }
1680  }
1681 
1682 display:
1683  /* display picture */
1684  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1685  video_display(is);
1686 
1687  frame_queue_next(&is->pictq);
1688 
1689  if (is->step && !is->paused)
1690  stream_toggle_pause(is);
1691  }
1692  }
1693  is->force_refresh = 0;
1694  if (show_status) {
1695  static int64_t last_time;
1696  int64_t cur_time;
1697  int aqsize, vqsize, sqsize;
1698  double av_diff;
1699 
1700  cur_time = av_gettime_relative();
1701  if (!last_time || (cur_time - last_time) >= 30000) {
1702  aqsize = 0;
1703  vqsize = 0;
1704  sqsize = 0;
1705  if (is->audio_st)
1706  aqsize = is->audioq.size;
1707  if (is->video_st)
1708  vqsize = is->videoq.size;
1709  if (is->subtitle_st)
1710  sqsize = is->subtitleq.size;
1711  av_diff = 0;
1712  if (is->audio_st && is->video_st)
1713  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1714  else if (is->video_st)
1715  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1716  else if (is->audio_st)
1717  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1719  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  get_master_clock(is),
1721  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1722  av_diff,
1724  aqsize / 1024,
1725  vqsize / 1024,
1726  sqsize,
1729  fflush(stdout);
1730  last_time = cur_time;
1731  }
1732  }
1733 }
1734 
1735 /* allocate a picture (needs to do that in main thread to avoid
1736  potential locking problems */
1737 static void alloc_picture(VideoState *is)
1738 {
1739  Frame *vp;
1740  int64_t bufferdiff;
1741 
1742  vp = &is->pictq.queue[is->pictq.windex];
1743 
1744  free_picture(vp);
1745 
1746  video_open(is, 0, vp);
1747 
1748  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1749  SDL_YV12_OVERLAY,
1750  screen);
1751  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1752  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1753  /* SDL allocates a buffer smaller than requested if the video
1754  * overlay hardware is unable to support the requested size. */
1756  "Error: the video system does not support an image\n"
1757  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1758  "to reduce the image size.\n", vp->width, vp->height );
1759  do_exit(is);
1760  }
1761 
1762  SDL_LockMutex(is->pictq.mutex);
1763  vp->allocated = 1;
1764  SDL_CondSignal(is->pictq.cond);
1765  SDL_UnlockMutex(is->pictq.mutex);
1766 }
1767 
1768 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1769  int i, width, height;
1770  Uint8 *p, *maxp;
1771  for (i = 0; i < 3; i++) {
1772  width = bmp->w;
1773  height = bmp->h;
1774  if (i > 0) {
1775  width >>= 1;
1776  height >>= 1;
1777  }
1778  if (bmp->pitches[i] > width) {
1779  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1780  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1781  *(p+1) = *p;
1782  }
1783  }
1784 }
1785 
1786 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1787 {
1788  Frame *vp;
1789 
1790 #if defined(DEBUG_SYNC) && 0
1791  printf("frame_type=%c pts=%0.3f\n",
1792  av_get_picture_type_char(src_frame->pict_type), pts);
1793 #endif
1794 
1795  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1796  return -1;
1797 
1798  vp->sar = src_frame->sample_aspect_ratio;
1799 
1800  /* alloc or resize hardware picture buffer */
1801  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1802  vp->width != src_frame->width ||
1803  vp->height != src_frame->height) {
1804  SDL_Event event;
1805 
1806  vp->allocated = 0;
1807  vp->reallocate = 0;
1808  vp->width = src_frame->width;
1809  vp->height = src_frame->height;
1810 
1811  /* the allocation must be done in the main thread to avoid
1812  locking problems. */
1813  event.type = FF_ALLOC_EVENT;
1814  event.user.data1 = is;
1815  SDL_PushEvent(&event);
1816 
1817  /* wait until the picture is allocated */
1818  SDL_LockMutex(is->pictq.mutex);
1819  while (!vp->allocated && !is->videoq.abort_request) {
1820  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1821  }
1822  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1823  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1824  while (!vp->allocated && !is->abort_request) {
1825  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1826  }
1827  }
1828  SDL_UnlockMutex(is->pictq.mutex);
1829 
1830  if (is->videoq.abort_request)
1831  return -1;
1832  }
1833 
1834  /* if the frame is not skipped, then display it */
1835  if (vp->bmp) {
1836  AVPicture pict = { { 0 } };
1837 
1838  /* get a pointer on the bitmap */
1839  SDL_LockYUVOverlay (vp->bmp);
1840 
1841  pict.data[0] = vp->bmp->pixels[0];
1842  pict.data[1] = vp->bmp->pixels[2];
1843  pict.data[2] = vp->bmp->pixels[1];
1844 
1845  pict.linesize[0] = vp->bmp->pitches[0];
1846  pict.linesize[1] = vp->bmp->pitches[2];
1847  pict.linesize[2] = vp->bmp->pitches[1];
1848 
1849 #if CONFIG_AVFILTER
1850  // FIXME use direct rendering
1851  av_picture_copy(&pict, (AVPicture *)src_frame,
1852  src_frame->format, vp->width, vp->height);
1853 #else
1854  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1856  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1858  if (!is->img_convert_ctx) {
1859  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1860  exit(1);
1861  }
1862  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1863  0, vp->height, pict.data, pict.linesize);
1864 #endif
1865  /* workaround SDL PITCH_WORKAROUND */
1867  /* update the bitmap content */
1868  SDL_UnlockYUVOverlay(vp->bmp);
1869 
1870  vp->pts = pts;
1871  vp->duration = duration;
1872  vp->pos = pos;
1873  vp->serial = serial;
1874 
1875  /* now we can update the picture count */
1876  frame_queue_push(&is->pictq);
1877  }
1878  return 0;
1879 }
1880 
1882 {
1883  int got_picture;
1884 
1885  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1886  return -1;
1887 
1888  if (got_picture) {
1889  double dpts = NAN;
1890 
1891  if (frame->pts != AV_NOPTS_VALUE)
1892  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1893 
1894  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1895 
1897  if (frame->pts != AV_NOPTS_VALUE) {
1898  double diff = dpts - get_master_clock(is);
1899  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1900  diff - is->frame_last_filter_delay < 0 &&
1901  is->viddec.pkt_serial == is->vidclk.serial &&
1902  is->videoq.nb_packets) {
1903  is->frame_drops_early++;
1904  av_frame_unref(frame);
1905  got_picture = 0;
1906  }
1907  }
1908  }
1909  }
1910 
1911  return got_picture;
1912 }
1913 
1914 #if CONFIG_AVFILTER
1915 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1916  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1917 {
1918  int ret, i;
1919  int nb_filters = graph->nb_filters;
1921 
1922  if (filtergraph) {
1923  outputs = avfilter_inout_alloc();
1924  inputs = avfilter_inout_alloc();
1925  if (!outputs || !inputs) {
1926  ret = AVERROR(ENOMEM);
1927  goto fail;
1928  }
1929 
1930  outputs->name = av_strdup("in");
1931  outputs->filter_ctx = source_ctx;
1932  outputs->pad_idx = 0;
1933  outputs->next = NULL;
1934 
1935  inputs->name = av_strdup("out");
1936  inputs->filter_ctx = sink_ctx;
1937  inputs->pad_idx = 0;
1938  inputs->next = NULL;
1939 
1940  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1941  goto fail;
1942  } else {
1943  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1944  goto fail;
1945  }
1946 
1947  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1948  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1949  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1950 
1951  ret = avfilter_graph_config(graph, NULL);
1952 fail:
1953  avfilter_inout_free(&outputs);
1954  avfilter_inout_free(&inputs);
1955  return ret;
1956 }
1957 
1958 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1959 {
1960  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1961  char sws_flags_str[128];
1962  char buffersrc_args[256];
1963  int ret;
1964  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1965  AVCodecContext *codec = is->video_st->codec;
1966  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1967 
1968  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1969  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1970  graph->scale_sws_opts = av_strdup(sws_flags_str);
1971 
1972  snprintf(buffersrc_args, sizeof(buffersrc_args),
1973  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1974  frame->width, frame->height, frame->format,
1976  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1977  if (fr.num && fr.den)
1978  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1979 
1980  if ((ret = avfilter_graph_create_filter(&filt_src,
1981  avfilter_get_by_name("buffer"),
1982  "ffplay_buffer", buffersrc_args, NULL,
1983  graph)) < 0)
1984  goto fail;
1985 
1986  ret = avfilter_graph_create_filter(&filt_out,
1987  avfilter_get_by_name("buffersink"),
1988  "ffplay_buffersink", NULL, NULL, graph);
1989  if (ret < 0)
1990  goto fail;
1991 
1992  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1993  goto fail;
1994 
1995  last_filter = filt_out;
1996 
1997 /* Note: this macro adds a filter before the lastly added filter, so the
1998  * processing order of the filters is in reverse */
1999 #define INSERT_FILT(name, arg) do { \
2000  AVFilterContext *filt_ctx; \
2001  \
2002  ret = avfilter_graph_create_filter(&filt_ctx, \
2003  avfilter_get_by_name(name), \
2004  "ffplay_" name, arg, NULL, graph); \
2005  if (ret < 0) \
2006  goto fail; \
2007  \
2008  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2009  if (ret < 0) \
2010  goto fail; \
2011  \
2012  last_filter = filt_ctx; \
2013 } while (0)
2014 
2015  /* SDL YUV code is not handling odd width/height for some driver
2016  * combinations, therefore we crop the picture to an even width/height. */
2017  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
2018 
2019  if (autorotate) {
2020  double theta = get_rotation(is->video_st);
2021 
2022  if (fabs(theta - 90) < 1.0) {
2023  INSERT_FILT("transpose", "clock");
2024  } else if (fabs(theta - 180) < 1.0) {
2025  INSERT_FILT("hflip", NULL);
2026  INSERT_FILT("vflip", NULL);
2027  } else if (fabs(theta - 270) < 1.0) {
2028  INSERT_FILT("transpose", "cclock");
2029  } else if (fabs(theta) > 1.0) {
2030  char rotate_buf[64];
2031  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
2032  INSERT_FILT("rotate", rotate_buf);
2033  }
2034  }
2035 
2036  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2037  goto fail;
2038 
2039  is->in_video_filter = filt_src;
2040  is->out_video_filter = filt_out;
2041 
2042 fail:
2043  return ret;
2044 }
2045 
2046 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2047 {
2049  int sample_rates[2] = { 0, -1 };
2050  int64_t channel_layouts[2] = { 0, -1 };
2051  int channels[2] = { 0, -1 };
2052  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2053  char aresample_swr_opts[512] = "";
2054  AVDictionaryEntry *e = NULL;
2055  char asrc_args[256];
2056  int ret;
2057 
2058  avfilter_graph_free(&is->agraph);
2059  if (!(is->agraph = avfilter_graph_alloc()))
2060  return AVERROR(ENOMEM);
2061 
2062  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2063  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2064  if (strlen(aresample_swr_opts))
2065  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2066  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2067 
2068  ret = snprintf(asrc_args, sizeof(asrc_args),
2069  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2070  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2071  is->audio_filter_src.channels,
2072  1, is->audio_filter_src.freq);
2073  if (is->audio_filter_src.channel_layout)
2074  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2075  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2076 
2077  ret = avfilter_graph_create_filter(&filt_asrc,
2078  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2079  asrc_args, NULL, is->agraph);
2080  if (ret < 0)
2081  goto end;
2082 
2083 
2084  ret = avfilter_graph_create_filter(&filt_asink,
2085  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2086  NULL, NULL, is->agraph);
2087  if (ret < 0)
2088  goto end;
2089 
2090  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2091  goto end;
2092  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2093  goto end;
2094 
2095  if (force_output_format) {
2096  channel_layouts[0] = is->audio_tgt.channel_layout;
2097  channels [0] = is->audio_tgt.channels;
2098  sample_rates [0] = is->audio_tgt.freq;
2099  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2100  goto end;
2101  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2102  goto end;
2103  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2104  goto end;
2105  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2106  goto end;
2107  }
2108 
2109 
2110  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2111  goto end;
2112 
2113  is->in_audio_filter = filt_asrc;
2114  is->out_audio_filter = filt_asink;
2115 
2116 end:
2117  if (ret < 0)
2118  avfilter_graph_free(&is->agraph);
2119  return ret;
2120 }
2121 #endif /* CONFIG_AVFILTER */
2122 
2123 static int audio_thread(void *arg)
2124 {
2125  VideoState *is = arg;
2126  AVFrame *frame = av_frame_alloc();
2127  Frame *af;
2128 #if CONFIG_AVFILTER
2129  int last_serial = -1;
2130  int64_t dec_channel_layout;
2131  int reconfigure;
2132 #endif
2133  int got_frame = 0;
2134  AVRational tb;
2135  int ret = 0;
2136 
2137  if (!frame)
2138  return AVERROR(ENOMEM);
2139 
2140  do {
2141  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2142  goto the_end;
2143 
2144  if (got_frame) {
2145  tb = (AVRational){1, frame->sample_rate};
2146 
2147 #if CONFIG_AVFILTER
2148  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2149 
2150  reconfigure =
2151  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2152  frame->format, av_frame_get_channels(frame)) ||
2153  is->audio_filter_src.channel_layout != dec_channel_layout ||
2154  is->audio_filter_src.freq != frame->sample_rate ||
2155  is->auddec.pkt_serial != last_serial;
2156 
2157  if (reconfigure) {
2158  char buf1[1024], buf2[1024];
2159  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2160  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2162  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2163  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2165 
2166  is->audio_filter_src.fmt = frame->format;
2167  is->audio_filter_src.channels = av_frame_get_channels(frame);
2168  is->audio_filter_src.channel_layout = dec_channel_layout;
2169  is->audio_filter_src.freq = frame->sample_rate;
2170  last_serial = is->auddec.pkt_serial;
2171 
2172  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2173  goto the_end;
2174  }
2175 
2176  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2177  goto the_end;
2178 
2179  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2180  tb = is->out_audio_filter->inputs[0]->time_base;
2181 #endif
2182  if (!(af = frame_queue_peek_writable(&is->sampq)))
2183  goto the_end;
2184 
2185  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2186  af->pos = av_frame_get_pkt_pos(frame);
2187  af->serial = is->auddec.pkt_serial;
2188  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2189 
2190  av_frame_move_ref(af->frame, frame);
2191  frame_queue_push(&is->sampq);
2192 
2193 #if CONFIG_AVFILTER
2194  if (is->audioq.serial != is->auddec.pkt_serial)
2195  break;
2196  }
2197  if (ret == AVERROR_EOF)
2198  is->auddec.finished = is->auddec.pkt_serial;
2199 #endif
2200  }
2201  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2202  the_end:
2203 #if CONFIG_AVFILTER
2204  avfilter_graph_free(&is->agraph);
2205 #endif
2206  av_frame_free(&frame);
2207  return ret;
2208 }
2209 
2210 static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2211 {
2213  d->decoder_tid = SDL_CreateThread(fn, arg);
2214 }
2215 
2216 static int video_thread(void *arg)
2217 {
2218  VideoState *is = arg;
2219  AVFrame *frame = av_frame_alloc();
2220  double pts;
2221  double duration;
2222  int ret;
2224  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2225 
2226 #if CONFIG_AVFILTER
2228  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2229  int last_w = 0;
2230  int last_h = 0;
2231  enum AVPixelFormat last_format = -2;
2232  int last_serial = -1;
2233  int last_vfilter_idx = 0;
2234  if (!graph) {
2235  av_frame_free(&frame);
2236  return AVERROR(ENOMEM);
2237  }
2238 
2239 #endif
2240 
2241  if (!frame) {
2242 #if CONFIG_AVFILTER
2243  avfilter_graph_free(&graph);
2244 #endif
2245  return AVERROR(ENOMEM);
2246  }
2247 
2248  for (;;) {
2249  ret = get_video_frame(is, frame);
2250  if (ret < 0)
2251  goto the_end;
2252  if (!ret)
2253  continue;
2254 
2255 #if CONFIG_AVFILTER
2256  if ( last_w != frame->width
2257  || last_h != frame->height
2258  || last_format != frame->format
2259  || last_serial != is->viddec.pkt_serial
2260  || last_vfilter_idx != is->vfilter_idx) {
2262  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2263  last_w, last_h,
2264  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2265  frame->width, frame->height,
2266  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2267  avfilter_graph_free(&graph);
2268  graph = avfilter_graph_alloc();
2269  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2270  SDL_Event event;
2271  event.type = FF_QUIT_EVENT;
2272  event.user.data1 = is;
2273  SDL_PushEvent(&event);
2274  goto the_end;
2275  }
2276  filt_in = is->in_video_filter;
2277  filt_out = is->out_video_filter;
2278  last_w = frame->width;
2279  last_h = frame->height;
2280  last_format = frame->format;
2281  last_serial = is->viddec.pkt_serial;
2282  last_vfilter_idx = is->vfilter_idx;
2283  frame_rate = filt_out->inputs[0]->frame_rate;
2284  }
2285 
2286  ret = av_buffersrc_add_frame(filt_in, frame);
2287  if (ret < 0)
2288  goto the_end;
2289 
2290  while (ret >= 0) {
2291  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2292 
2293  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2294  if (ret < 0) {
2295  if (ret == AVERROR_EOF)
2296  is->viddec.finished = is->viddec.pkt_serial;
2297  ret = 0;
2298  break;
2299  }
2300 
2302  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2303  is->frame_last_filter_delay = 0;
2304  tb = filt_out->inputs[0]->time_base;
2305 #endif
2306  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2307  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2308  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2309  av_frame_unref(frame);
2310 #if CONFIG_AVFILTER
2311  }
2312 #endif
2313 
2314  if (ret < 0)
2315  goto the_end;
2316  }
2317  the_end:
2318 #if CONFIG_AVFILTER
2319  avfilter_graph_free(&graph);
2320 #endif
2321  av_frame_free(&frame);
2322  return 0;
2323 }
2324 
2325 static int subtitle_thread(void *arg)
2326 {
2327  VideoState *is = arg;
2328  Frame *sp;
2329  int got_subtitle;
2330  double pts;
2331  int i, j;
2332  int r, g, b, y, u, v, a;
2333 
2334  for (;;) {
2335  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2336  return 0;
2337 
2338  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2339  break;
2340 
2341  pts = 0;
2342 
2343  if (got_subtitle && sp->sub.format == 0) {
2344  if (sp->sub.pts != AV_NOPTS_VALUE)
2345  pts = sp->sub.pts / (double)AV_TIME_BASE;
2346  sp->pts = pts;
2347  sp->serial = is->subdec.pkt_serial;
2348 
2349  for (i = 0; i < sp->sub.num_rects; i++)
2350  {
2351  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2352  {
2353  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2354  y = RGB_TO_Y_CCIR(r, g, b);
2355  u = RGB_TO_U_CCIR(r, g, b, 0);
2356  v = RGB_TO_V_CCIR(r, g, b, 0);
2357  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2358  }
2359  }
2360 
2361  /* now we can update the picture count */
2362  frame_queue_push(&is->subpq);
2363  } else if (got_subtitle) {
2364  avsubtitle_free(&sp->sub);
2365  }
2366  }
2367  return 0;
2368 }
2369 
2370 /* copy samples for viewing in editor window */
2371 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2372 {
2373  int size, len;
2374 
2375  size = samples_size / sizeof(short);
2376  while (size > 0) {
2378  if (len > size)
2379  len = size;
2380  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2381  samples += len;
2382  is->sample_array_index += len;
2384  is->sample_array_index = 0;
2385  size -= len;
2386  }
2387 }
2388 
2389 /* return the wanted number of samples to get better sync if sync_type is video
2390  * or external master clock */
2391 static int synchronize_audio(VideoState *is, int nb_samples)
2392 {
2393  int wanted_nb_samples = nb_samples;
2394 
2395  /* if not master, then we try to remove or add samples to correct the clock */
2397  double diff, avg_diff;
2398  int min_nb_samples, max_nb_samples;
2399 
2400  diff = get_clock(&is->audclk) - get_master_clock(is);
2401 
2402  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2403  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2405  /* not enough measures to have a correct estimate */
2406  is->audio_diff_avg_count++;
2407  } else {
2408  /* estimate the A-V difference */
2409  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2410 
2411  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2412  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2413  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2414  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2415  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2416  }
2417  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2418  diff, avg_diff, wanted_nb_samples - nb_samples,
2420  }
2421  } else {
2422  /* too big difference : may be initial PTS errors, so
2423  reset A-V filter */
2424  is->audio_diff_avg_count = 0;
2425  is->audio_diff_cum = 0;
2426  }
2427  }
2428 
2429  return wanted_nb_samples;
2430 }
2431 
2432 /**
2433  * Decode one audio frame and return its uncompressed size.
2434  *
2435  * The processed audio frame is decoded, converted if required, and
2436  * stored in is->audio_buf, with size in bytes given by the return
2437  * value.
2438  */
2440 {
2441  int data_size, resampled_data_size;
2442  int64_t dec_channel_layout;
2443  av_unused double audio_clock0;
2444  int wanted_nb_samples;
2445  Frame *af;
2446 
2447  if (is->paused)
2448  return -1;
2449 
2450  do {
2451  if (!(af = frame_queue_peek_readable(&is->sampq)))
2452  return -1;
2453  frame_queue_next(&is->sampq);
2454  } while (af->serial != is->audioq.serial);
2455 
2457  af->frame->nb_samples,
2458  af->frame->format, 1);
2459 
2460  dec_channel_layout =
2463  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2464 
2465  if (af->frame->format != is->audio_src.fmt ||
2466  dec_channel_layout != is->audio_src.channel_layout ||
2467  af->frame->sample_rate != is->audio_src.freq ||
2468  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2469  swr_free(&is->swr_ctx);
2472  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2473  0, NULL);
2474  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2476  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2479  swr_free(&is->swr_ctx);
2480  return -1;
2481  }
2482  is->audio_src.channel_layout = dec_channel_layout;
2484  is->audio_src.freq = af->frame->sample_rate;
2485  is->audio_src.fmt = af->frame->format;
2486  }
2487 
2488  if (is->swr_ctx) {
2489  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2490  uint8_t **out = &is->audio_buf1;
2491  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2492  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2493  int len2;
2494  if (out_size < 0) {
2495  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2496  return -1;
2497  }
2498  if (wanted_nb_samples != af->frame->nb_samples) {
2499  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2500  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2501  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2502  return -1;
2503  }
2504  }
2505  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2506  if (!is->audio_buf1)
2507  return AVERROR(ENOMEM);
2508  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2509  if (len2 < 0) {
2510  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2511  return -1;
2512  }
2513  if (len2 == out_count) {
2514  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2515  if (swr_init(is->swr_ctx) < 0)
2516  swr_free(&is->swr_ctx);
2517  }
2518  is->audio_buf = is->audio_buf1;
2519  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2520  } else {
2521  is->audio_buf = af->frame->data[0];
2522  resampled_data_size = data_size;
2523  }
2524 
2525  audio_clock0 = is->audio_clock;
2526  /* update the audio clock with the pts */
2527  if (!isnan(af->pts))
2528  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2529  else
2530  is->audio_clock = NAN;
2531  is->audio_clock_serial = af->serial;
2532 #ifdef DEBUG
2533  {
2534  static double last_clock;
2535  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2536  is->audio_clock - last_clock,
2537  is->audio_clock, audio_clock0);
2538  last_clock = is->audio_clock;
2539  }
2540 #endif
2541  return resampled_data_size;
2542 }
2543 
2544 /* prepare a new audio buffer */
2545 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2546 {
2547  VideoState *is = opaque;
2548  int audio_size, len1;
2549 
2551 
2552  while (len > 0) {
2553  if (is->audio_buf_index >= is->audio_buf_size) {
2554  audio_size = audio_decode_frame(is);
2555  if (audio_size < 0) {
2556  /* if error, just output silence */
2557  is->audio_buf = is->silence_buf;
2558  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2559  } else {
2560  if (is->show_mode != SHOW_MODE_VIDEO)
2561  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2562  is->audio_buf_size = audio_size;
2563  }
2564  is->audio_buf_index = 0;
2565  }
2566  len1 = is->audio_buf_size - is->audio_buf_index;
2567  if (len1 > len)
2568  len1 = len;
2569  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2570  len -= len1;
2571  stream += len1;
2572  is->audio_buf_index += len1;
2573  }
2575  /* Let's assume the audio driver that is used by SDL has two periods. */
2576  if (!isnan(is->audio_clock)) {
2578  sync_clock_to_slave(&is->extclk, &is->audclk);
2579  }
2580 }
2581 
2582 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2583 {
2584  SDL_AudioSpec wanted_spec, spec;
2585  const char *env;
2586  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2587  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2588  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2589 
2590  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2591  if (env) {
2592  wanted_nb_channels = atoi(env);
2593  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2594  }
2595  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2596  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2597  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2598  }
2599  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2600  wanted_spec.channels = wanted_nb_channels;
2601  wanted_spec.freq = wanted_sample_rate;
2602  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2603  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2604  return -1;
2605  }
2606  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2607  next_sample_rate_idx--;
2608  wanted_spec.format = AUDIO_S16SYS;
2609  wanted_spec.silence = 0;
2610  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2611  wanted_spec.callback = sdl_audio_callback;
2612  wanted_spec.userdata = opaque;
2613  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2614  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2615  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2616  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2617  if (!wanted_spec.channels) {
2618  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2619  wanted_spec.channels = wanted_nb_channels;
2620  if (!wanted_spec.freq) {
2622  "No more combinations to try, audio open failed\n");
2623  return -1;
2624  }
2625  }
2626  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2627  }
2628  if (spec.format != AUDIO_S16SYS) {
2630  "SDL advised audio format %d is not supported!\n", spec.format);
2631  return -1;
2632  }
2633  if (spec.channels != wanted_spec.channels) {
2634  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2635  if (!wanted_channel_layout) {
2637  "SDL advised channel count %d is not supported!\n", spec.channels);
2638  return -1;
2639  }
2640  }
2641 
2642  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2643  audio_hw_params->freq = spec.freq;
2644  audio_hw_params->channel_layout = wanted_channel_layout;
2645  audio_hw_params->channels = spec.channels;
2646  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2647  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2648  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2649  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2650  return -1;
2651  }
2652  return spec.size;
2653 }
2654 
2655 /* open a given stream. Return 0 if OK */
2656 static int stream_component_open(VideoState *is, int stream_index)
2657 {
2658  AVFormatContext *ic = is->ic;
2659  AVCodecContext *avctx;
2660  AVCodec *codec;
2661  const char *forced_codec_name = NULL;
2662  AVDictionary *opts;
2663  AVDictionaryEntry *t = NULL;
2664  int sample_rate, nb_channels;
2665  int64_t channel_layout;
2666  int ret = 0;
2667  int stream_lowres = lowres;
2668 
2669  if (stream_index < 0 || stream_index >= ic->nb_streams)
2670  return -1;
2671  avctx = ic->streams[stream_index]->codec;
2672 
2673  codec = avcodec_find_decoder(avctx->codec_id);
2674 
2675  switch(avctx->codec_type){
2676  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2677  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2678  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2679  }
2680  if (forced_codec_name)
2681  codec = avcodec_find_decoder_by_name(forced_codec_name);
2682  if (!codec) {
2683  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2684  "No codec could be found with name '%s'\n", forced_codec_name);
2685  else av_log(NULL, AV_LOG_WARNING,
2686  "No codec could be found with id %d\n", avctx->codec_id);
2687  return -1;
2688  }
2689 
2690  avctx->codec_id = codec->id;
2691  if(stream_lowres > av_codec_get_max_lowres(codec)){
2692  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2693  av_codec_get_max_lowres(codec));
2694  stream_lowres = av_codec_get_max_lowres(codec);
2695  }
2696  av_codec_set_lowres(avctx, stream_lowres);
2697 
2698  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2699  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2700  if(codec->capabilities & CODEC_CAP_DR1)
2701  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2702 
2703  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2704  if (!av_dict_get(opts, "threads", NULL, 0))
2705  av_dict_set(&opts, "threads", "auto", 0);
2706  if (stream_lowres)
2707  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2708  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2709  av_dict_set(&opts, "refcounted_frames", "1", 0);
2710  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2711  goto fail;
2712  }
2713  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2714  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2716  goto fail;
2717  }
2718 
2719  is->eof = 0;
2720  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2721  switch (avctx->codec_type) {
2722  case AVMEDIA_TYPE_AUDIO:
2723 #if CONFIG_AVFILTER
2724  {
2725  AVFilterLink *link;
2726 
2727  is->audio_filter_src.freq = avctx->sample_rate;
2728  is->audio_filter_src.channels = avctx->channels;
2729  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2730  is->audio_filter_src.fmt = avctx->sample_fmt;
2731  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2732  goto fail;
2733  link = is->out_audio_filter->inputs[0];
2734  sample_rate = link->sample_rate;
2735  nb_channels = link->channels;
2736  channel_layout = link->channel_layout;
2737  }
2738 #else
2739  sample_rate = avctx->sample_rate;
2740  nb_channels = avctx->channels;
2741  channel_layout = avctx->channel_layout;
2742 #endif
2743 
2744  /* prepare audio output */
2745  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2746  goto fail;
2747  is->audio_hw_buf_size = ret;
2748  is->audio_src = is->audio_tgt;
2749  is->audio_buf_size = 0;
2750  is->audio_buf_index = 0;
2751 
2752  /* init averaging filter */
2753  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2754  is->audio_diff_avg_count = 0;
2755  /* since we do not have a precise anough audio fifo fullness,
2756  we correct audio sync only if larger than this threshold */
2758 
2759  is->audio_stream = stream_index;
2760  is->audio_st = ic->streams[stream_index];
2761 
2762  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2764  is->auddec.start_pts = is->audio_st->start_time;
2766  }
2767  decoder_start(&is->auddec, audio_thread, is);
2768  SDL_PauseAudio(0);
2769  break;
2770  case AVMEDIA_TYPE_VIDEO:
2771  is->video_stream = stream_index;
2772  is->video_st = ic->streams[stream_index];
2773 
2774  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2775  decoder_start(&is->viddec, video_thread, is);
2776  is->queue_attachments_req = 1;
2777  break;
2778  case AVMEDIA_TYPE_SUBTITLE:
2779  is->subtitle_stream = stream_index;
2780  is->subtitle_st = ic->streams[stream_index];
2781 
2782  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2784  break;
2785  default:
2786  break;
2787  }
2788 
2789 fail:
2790  av_dict_free(&opts);
2791 
2792  return ret;
2793 }
2794 
2795 static void stream_component_close(VideoState *is, int stream_index)
2796 {
2797  AVFormatContext *ic = is->ic;
2798  AVCodecContext *avctx;
2799 
2800  if (stream_index < 0 || stream_index >= ic->nb_streams)
2801  return;
2802  avctx = ic->streams[stream_index]->codec;
2803 
2804  switch (avctx->codec_type) {
2805  case AVMEDIA_TYPE_AUDIO:
2806  decoder_abort(&is->auddec, &is->sampq);
2807  SDL_CloseAudio();
2808  decoder_destroy(&is->auddec);
2809  swr_free(&is->swr_ctx);
2810  av_freep(&is->audio_buf1);
2811  is->audio_buf1_size = 0;
2812  is->audio_buf = NULL;
2813 
2814  if (is->rdft) {
2815  av_rdft_end(is->rdft);
2816  av_freep(&is->rdft_data);
2817  is->rdft = NULL;
2818  is->rdft_bits = 0;
2819  }
2820  break;
2821  case AVMEDIA_TYPE_VIDEO:
2822  decoder_abort(&is->viddec, &is->pictq);
2823  decoder_destroy(&is->viddec);
2824  break;
2825  case AVMEDIA_TYPE_SUBTITLE:
2826  decoder_abort(&is->subdec, &is->subpq);
2827  decoder_destroy(&is->subdec);
2828  break;
2829  default:
2830  break;
2831  }
2832 
2833  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2834  avcodec_close(avctx);
2835  switch (avctx->codec_type) {
2836  case AVMEDIA_TYPE_AUDIO:
2837  is->audio_st = NULL;
2838  is->audio_stream = -1;
2839  break;
2840  case AVMEDIA_TYPE_VIDEO:
2841  is->video_st = NULL;
2842  is->video_stream = -1;
2843  break;
2844  case AVMEDIA_TYPE_SUBTITLE:
2845  is->subtitle_st = NULL;
2846  is->subtitle_stream = -1;
2847  break;
2848  default:
2849  break;
2850  }
2851 }
2852 
2853 static int decode_interrupt_cb(void *ctx)
2854 {
2855  VideoState *is = ctx;
2856  return is->abort_request;
2857 }
2858 
2860 {
2861  if( !strcmp(s->iformat->name, "rtp")
2862  || !strcmp(s->iformat->name, "rtsp")
2863  || !strcmp(s->iformat->name, "sdp")
2864  )
2865  return 1;
2866 
2867  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2868  || !strncmp(s->filename, "udp:", 4)
2869  )
2870  )
2871  return 1;
2872  return 0;
2873 }
2874 
2875 /* this thread gets the stream from the disk or the network */
2876 static int read_thread(void *arg)
2877 {
2878  VideoState *is = arg;
2879  AVFormatContext *ic = NULL;
2880  int err, i, ret;
2881  int st_index[AVMEDIA_TYPE_NB];
2882  AVPacket pkt1, *pkt = &pkt1;
2883  int64_t stream_start_time;
2884  int pkt_in_play_range = 0;
2885  AVDictionaryEntry *t;
2886  AVDictionary **opts;
2887  int orig_nb_streams;
2888  SDL_mutex *wait_mutex = SDL_CreateMutex();
2889  int scan_all_pmts_set = 0;
2890  int64_t pkt_ts;
2891 
2892  memset(st_index, -1, sizeof(st_index));
2893  is->last_video_stream = is->video_stream = -1;
2894  is->last_audio_stream = is->audio_stream = -1;
2895  is->last_subtitle_stream = is->subtitle_stream = -1;
2896  is->eof = 0;
2897 
2898  ic = avformat_alloc_context();
2899  if (!ic) {
2900  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2901  ret = AVERROR(ENOMEM);
2902  goto fail;
2903  }
2905  ic->interrupt_callback.opaque = is;
2906  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2907  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2908  scan_all_pmts_set = 1;
2909  }
2910  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2911  if (err < 0) {
2912  print_error(is->filename, err);
2913  ret = -1;
2914  goto fail;
2915  }
2916  if (scan_all_pmts_set)
2917  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2918 
2920  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2922  goto fail;
2923  }
2924  is->ic = ic;
2925 
2926  if (genpts)
2927  ic->flags |= AVFMT_FLAG_GENPTS;
2928 
2930 
2932  orig_nb_streams = ic->nb_streams;
2933 
2934  err = avformat_find_stream_info(ic, opts);
2935 
2936  for (i = 0; i < orig_nb_streams; i++)
2937  av_dict_free(&opts[i]);
2938  av_freep(&opts);
2939 
2940  if (err < 0) {
2942  "%s: could not find codec parameters\n", is->filename);
2943  ret = -1;
2944  goto fail;
2945  }
2946 
2947  if (ic->pb)
2948  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2949 
2950  if (seek_by_bytes < 0)
2951  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2952 
2953  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2954 
2955  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2956  window_title = av_asprintf("%s - %s", t->value, input_filename);
2957 
2958  /* if seeking requested, we execute it */
2959  if (start_time != AV_NOPTS_VALUE) {
2960  int64_t timestamp;
2961 
2962  timestamp = start_time;
2963  /* add the stream start time */
2964  if (ic->start_time != AV_NOPTS_VALUE)
2965  timestamp += ic->start_time;
2966  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2967  if (ret < 0) {
2968  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2969  is->filename, (double)timestamp / AV_TIME_BASE);
2970  }
2971  }
2972 
2973  is->realtime = is_realtime(ic);
2974 
2975  if (show_status)
2976  av_dump_format(ic, 0, is->filename, 0);
2977 
2978  for (i = 0; i < ic->nb_streams; i++) {
2979  AVStream *st = ic->streams[i];
2980  enum AVMediaType type = st->codec->codec_type;
2981  st->discard = AVDISCARD_ALL;
2982  if (wanted_stream_spec[type] && st_index[type] == -1)
2983  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2984  st_index[type] = i;
2985  }
2986  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2987  if (wanted_stream_spec[i] && st_index[i] == -1) {
2988  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2989  st_index[i] = INT_MAX;
2990  }
2991  }
2992 
2993  if (!video_disable)
2994  st_index[AVMEDIA_TYPE_VIDEO] =
2996  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2997  if (!audio_disable)
2998  st_index[AVMEDIA_TYPE_AUDIO] =
3000  st_index[AVMEDIA_TYPE_AUDIO],
3001  st_index[AVMEDIA_TYPE_VIDEO],
3002  NULL, 0);
3004  st_index[AVMEDIA_TYPE_SUBTITLE] =
3006  st_index[AVMEDIA_TYPE_SUBTITLE],
3007  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
3008  st_index[AVMEDIA_TYPE_AUDIO] :
3009  st_index[AVMEDIA_TYPE_VIDEO]),
3010  NULL, 0);
3011 
3012  is->show_mode = show_mode;
3013  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3014  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
3015  AVCodecContext *avctx = st->codec;
3017  if (avctx->width)
3018  set_default_window_size(avctx->width, avctx->height, sar);
3019  }
3020 
3021  /* open the streams */
3022  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3023  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
3024  }
3025 
3026  ret = -1;
3027  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3028  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
3029  }
3030  if (is->show_mode == SHOW_MODE_NONE)
3031  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3032 
3033  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3034  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
3035  }
3036 
3037  if (is->video_stream < 0 && is->audio_stream < 0) {
3038  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3039  is->filename);
3040  ret = -1;
3041  goto fail;
3042  }
3043 
3044  if (infinite_buffer < 0 && is->realtime)
3045  infinite_buffer = 1;
3046 
3047  for (;;) {
3048  if (is->abort_request)
3049  break;
3050  if (is->paused != is->last_paused) {
3051  is->last_paused = is->paused;
3052  if (is->paused)
3053  is->read_pause_return = av_read_pause(ic);
3054  else
3055  av_read_play(ic);
3056  }
3057 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3058  if (is->paused &&
3059  (!strcmp(ic->iformat->name, "rtsp") ||
3060  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3061  /* wait 10 ms to avoid trying to get another packet */
3062  /* XXX: horrible */
3063  SDL_Delay(10);
3064  continue;
3065  }
3066 #endif
3067  if (is->seek_req) {
3068  int64_t seek_target = is->seek_pos;
3069  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3070  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3071 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3072 // of the seek_pos/seek_rel variables
3073 
3074  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3075  if (ret < 0) {
3077  "%s: error while seeking\n", is->ic->filename);
3078  } else {
3079  if (is->audio_stream >= 0) {
3080  packet_queue_flush(&is->audioq);
3081  packet_queue_put(&is->audioq, &flush_pkt);
3082  }
3083  if (is->subtitle_stream >= 0) {
3085  packet_queue_put(&is->subtitleq, &flush_pkt);
3086  }
3087  if (is->video_stream >= 0) {
3088  packet_queue_flush(&is->videoq);
3089  packet_queue_put(&is->videoq, &flush_pkt);
3090  }
3091  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3092  set_clock(&is->extclk, NAN, 0);
3093  } else {
3094  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3095  }
3096  }
3097  is->seek_req = 0;
3098  is->queue_attachments_req = 1;
3099  is->eof = 0;
3100  if (is->paused)
3101  step_to_next_frame(is);
3102  }
3103  if (is->queue_attachments_req) {
3105  AVPacket copy;
3106  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3107  goto fail;
3108  packet_queue_put(&is->videoq, &copy);
3110  }
3111  is->queue_attachments_req = 0;
3112  }
3113 
3114  /* if the queue are full, no need to read more */
3115  if (infinite_buffer<1 &&
3116  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3117  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3118  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3120  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3121  /* wait 10 ms */
3122  SDL_LockMutex(wait_mutex);
3123  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3124  SDL_UnlockMutex(wait_mutex);
3125  continue;
3126  }
3127  if (!is->paused &&
3128  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3129  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3130  if (loop != 1 && (!loop || --loop)) {
3131  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3132  } else if (autoexit) {
3133  ret = AVERROR_EOF;
3134  goto fail;
3135  }
3136  }
3137  ret = av_read_frame(ic, pkt);
3138  if (ret < 0) {
3139  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3140  if (is->video_stream >= 0)
3142  if (is->audio_stream >= 0)
3144  if (is->subtitle_stream >= 0)
3146  is->eof = 1;
3147  }
3148  if (ic->pb && ic->pb->error)
3149  break;
3150  SDL_LockMutex(wait_mutex);
3151  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3152  SDL_UnlockMutex(wait_mutex);
3153  continue;
3154  } else {
3155  is->eof = 0;
3156  }
3157  /* check if packet is in play range specified by user, then queue, otherwise discard */
3158  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3159  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3160  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3161  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3162  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3163  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3164  <= ((double)duration / 1000000);
3165  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3166  packet_queue_put(&is->audioq, pkt);
3167  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3169  packet_queue_put(&is->videoq, pkt);
3170  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3171  packet_queue_put(&is->subtitleq, pkt);
3172  } else {
3173  av_free_packet(pkt);
3174  }
3175  }
3176  /* wait until the end */
3177  while (!is->abort_request) {
3178  SDL_Delay(100);
3179  }
3180 
3181  ret = 0;
3182  fail:
3183  /* close each stream */
3184  if (is->audio_stream >= 0)
3186  if (is->video_stream >= 0)
3188  if (is->subtitle_stream >= 0)
3190  if (ic) {
3191  avformat_close_input(&ic);
3192  is->ic = NULL;
3193  }
3194 
3195  if (ret != 0) {
3196  SDL_Event event;
3197 
3198  event.type = FF_QUIT_EVENT;
3199  event.user.data1 = is;
3200  SDL_PushEvent(&event);
3201  }
3202  SDL_DestroyMutex(wait_mutex);
3203  return 0;
3204 }
3205 
3206 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3207 {
3208  VideoState *is;
3209 
3210  is = av_mallocz(sizeof(VideoState));
3211  if (!is)
3212  return NULL;
3213  av_strlcpy(is->filename, filename, sizeof(is->filename));
3214  is->iformat = iformat;
3215  is->ytop = 0;
3216  is->xleft = 0;
3217 
3218  /* start video display */
3219  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3220  goto fail;
3221  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3222  goto fail;
3223  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3224  goto fail;
3225 
3226  packet_queue_init(&is->videoq);
3227  packet_queue_init(&is->audioq);
3229 
3230  is->continue_read_thread = SDL_CreateCond();
3231 
3232  init_clock(&is->vidclk, &is->videoq.serial);
3233  init_clock(&is->audclk, &is->audioq.serial);
3234  init_clock(&is->extclk, &is->extclk.serial);
3235  is->audio_clock_serial = -1;
3236  is->av_sync_type = av_sync_type;
3237  is->read_tid = SDL_CreateThread(read_thread, is);
3238  if (!is->read_tid) {
3239 fail:
3240  stream_close(is);
3241  return NULL;
3242  }
3243  return is;
3244 }
3245 
3247 {
3248  AVFormatContext *ic = is->ic;
3249  int start_index, stream_index;
3250  int old_index;
3251  AVStream *st;
3252  AVProgram *p = NULL;
3253  int nb_streams = is->ic->nb_streams;
3254 
3255  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3256  start_index = is->last_video_stream;
3257  old_index = is->video_stream;
3258  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3259  start_index = is->last_audio_stream;
3260  old_index = is->audio_stream;
3261  } else {
3262  start_index = is->last_subtitle_stream;
3263  old_index = is->subtitle_stream;
3264  }
3265  stream_index = start_index;
3266 
3267  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3269  if (p) {
3270  nb_streams = p->nb_stream_indexes;
3271  for (start_index = 0; start_index < nb_streams; start_index++)
3272  if (p->stream_index[start_index] == stream_index)
3273  break;
3274  if (start_index == nb_streams)
3275  start_index = -1;
3276  stream_index = start_index;
3277  }
3278  }
3279 
3280  for (;;) {
3281  if (++stream_index >= nb_streams)
3282  {
3283  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3284  {
3285  stream_index = -1;
3286  is->last_subtitle_stream = -1;
3287  goto the_end;
3288  }
3289  if (start_index == -1)
3290  return;
3291  stream_index = 0;
3292  }
3293  if (stream_index == start_index)
3294  return;
3295  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3296  if (st->codec->codec_type == codec_type) {
3297  /* check that parameters are OK */
3298  switch (codec_type) {
3299  case AVMEDIA_TYPE_AUDIO:
3300  if (st->codec->sample_rate != 0 &&
3301  st->codec->channels != 0)
3302  goto the_end;
3303  break;
3304  case AVMEDIA_TYPE_VIDEO:
3305  case AVMEDIA_TYPE_SUBTITLE:
3306  goto the_end;
3307  default:
3308  break;
3309  }
3310  }
3311  }
3312  the_end:
3313  if (p && stream_index != -1)
3314  stream_index = p->stream_index[stream_index];
3315  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3316  av_get_media_type_string(codec_type),
3317  old_index,
3318  stream_index);
3319 
3320  stream_component_close(is, old_index);
3321  stream_component_open(is, stream_index);
3322 }
3323 
3324 
3326 {
3327 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3328  /* OS X needs to reallocate the SDL overlays */
3329  int i;
3330  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3331  is->pictq.queue[i].reallocate = 1;
3332 #endif
3334  video_open(is, 1, NULL);
3335 }
3336 
3338 {
3339  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3340  int next = is->show_mode;
3341  do {
3342  next = (next + 1) % SHOW_MODE_NB;
3343  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3344  if (is->show_mode != next) {
3346  is->xleft, is->ytop, is->width, is->height,
3347  bgcolor, 1);
3348  is->force_refresh = 1;
3349  is->show_mode = next;
3350  }
3351 }
3352 
3353 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3354  double remaining_time = 0.0;
3355  SDL_PumpEvents();
3356  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3358  SDL_ShowCursor(0);
3359  cursor_hidden = 1;
3360  }
3361  if (remaining_time > 0.0)
3362  av_usleep((int64_t)(remaining_time * 1000000.0));
3363  remaining_time = REFRESH_RATE;
3364  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3365  video_refresh(is, &remaining_time);
3366  SDL_PumpEvents();
3367  }
3368 }
3369 
3370 static void seek_chapter(VideoState *is, int incr)
3371 {
3372  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3373  int i;
3374 
3375  if (!is->ic->nb_chapters)
3376  return;
3377 
3378  /* find the current chapter */
3379  for (i = 0; i < is->ic->nb_chapters; i++) {
3380  AVChapter *ch = is->ic->chapters[i];
3381  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3382  i--;
3383  break;
3384  }
3385  }
3386 
3387  i += incr;
3388  i = FFMAX(i, 0);
3389  if (i >= is->ic->nb_chapters)
3390  return;
3391 
3392  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3393  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3394  AV_TIME_BASE_Q), 0, 0);
3395 }
3396 
3397 /* handle an event sent by the GUI */
3398 static void event_loop(VideoState *cur_stream)
3399 {
3400  SDL_Event event;
3401  double incr, pos, frac;
3402 
3403  for (;;) {
3404  double x;
3405  refresh_loop_wait_event(cur_stream, &event);
3406  switch (event.type) {
3407  case SDL_KEYDOWN:
3408  if (exit_on_keydown) {
3409  do_exit(cur_stream);
3410  break;
3411  }
3412  switch (event.key.keysym.sym) {
3413  case SDLK_ESCAPE:
3414  case SDLK_q:
3415  do_exit(cur_stream);
3416  break;
3417  case SDLK_f:
3418  toggle_full_screen(cur_stream);
3419  cur_stream->force_refresh = 1;
3420  break;
3421  case SDLK_p:
3422  case SDLK_SPACE:
3423  toggle_pause(cur_stream);
3424  break;
3425  case SDLK_s: // S: Step to next frame
3426  step_to_next_frame(cur_stream);
3427  break;
3428  case SDLK_a:
3430  break;
3431  case SDLK_v:
3433  break;
3434  case SDLK_c:
3438  break;
3439  case SDLK_t:
3441  break;
3442  case SDLK_w:
3443 #if CONFIG_AVFILTER
3444  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3445  if (++cur_stream->vfilter_idx >= nb_vfilters)
3446  cur_stream->vfilter_idx = 0;
3447  } else {
3448  cur_stream->vfilter_idx = 0;
3449  toggle_audio_display(cur_stream);
3450  }
3451 #else
3452  toggle_audio_display(cur_stream);
3453 #endif
3454  break;
3455  case SDLK_PAGEUP:
3456  if (cur_stream->ic->nb_chapters <= 1) {
3457  incr = 600.0;
3458  goto do_seek;
3459  }
3460  seek_chapter(cur_stream, 1);
3461  break;
3462  case SDLK_PAGEDOWN:
3463  if (cur_stream->ic->nb_chapters <= 1) {
3464  incr = -600.0;
3465  goto do_seek;
3466  }
3467  seek_chapter(cur_stream, -1);
3468  break;
3469  case SDLK_LEFT:
3470  incr = -10.0;
3471  goto do_seek;
3472  case SDLK_RIGHT:
3473  incr = 10.0;
3474  goto do_seek;
3475  case SDLK_UP:
3476  incr = 60.0;
3477  goto do_seek;
3478  case SDLK_DOWN:
3479  incr = -60.0;
3480  do_seek:
3481  if (seek_by_bytes) {
3482  pos = -1;
3483  if (pos < 0 && cur_stream->video_stream >= 0)
3484  pos = frame_queue_last_pos(&cur_stream->pictq);
3485  if (pos < 0 && cur_stream->audio_stream >= 0)
3486  pos = frame_queue_last_pos(&cur_stream->sampq);
3487  if (pos < 0)
3488  pos = avio_tell(cur_stream->ic->pb);
3489  if (cur_stream->ic->bit_rate)
3490  incr *= cur_stream->ic->bit_rate / 8.0;
3491  else
3492  incr *= 180000.0;
3493  pos += incr;
3494  stream_seek(cur_stream, pos, incr, 1);
3495  } else {
3496  pos = get_master_clock(cur_stream);
3497  if (isnan(pos))
3498  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3499  pos += incr;
3500  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3501  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3502  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3503  }
3504  break;
3505  default:
3506  break;
3507  }
3508  break;
3509  case SDL_VIDEOEXPOSE:
3510  cur_stream->force_refresh = 1;
3511  break;
3512  case SDL_MOUSEBUTTONDOWN:
3513  if (exit_on_mousedown) {
3514  do_exit(cur_stream);
3515  break;
3516  }
3517  case SDL_MOUSEMOTION:
3518  if (cursor_hidden) {
3519  SDL_ShowCursor(1);
3520  cursor_hidden = 0;
3521  }
3523  if (event.type == SDL_MOUSEBUTTONDOWN) {
3524  x = event.button.x;
3525  } else {
3526  if (event.motion.state != SDL_PRESSED)
3527  break;
3528  x = event.motion.x;
3529  }
3530  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3531  uint64_t size = avio_size(cur_stream->ic->pb);
3532  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3533  } else {
3534  int64_t ts;
3535  int ns, hh, mm, ss;
3536  int tns, thh, tmm, tss;
3537  tns = cur_stream->ic->duration / 1000000LL;
3538  thh = tns / 3600;
3539  tmm = (tns % 3600) / 60;
3540  tss = (tns % 60);
3541  frac = x / cur_stream->width;
3542  ns = frac * tns;
3543  hh = ns / 3600;
3544  mm = (ns % 3600) / 60;
3545  ss = (ns % 60);
3547  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3548  hh, mm, ss, thh, tmm, tss);
3549  ts = frac * cur_stream->ic->duration;
3550  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3551  ts += cur_stream->ic->start_time;
3552  stream_seek(cur_stream, ts, 0, 0);
3553  }
3554  break;
3555  case SDL_VIDEORESIZE:
3556  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3557  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3558  if (!screen) {
3559  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3560  do_exit(cur_stream);
3561  }
3562  screen_width = cur_stream->width = screen->w;
3563  screen_height = cur_stream->height = screen->h;
3564  cur_stream->force_refresh = 1;
3565  break;
3566  case SDL_QUIT:
3567  case FF_QUIT_EVENT:
3568  do_exit(cur_stream);
3569  break;
3570  case FF_ALLOC_EVENT:
3571  alloc_picture(event.user.data1);
3572  break;
3573  default:
3574  break;
3575  }
3576  }
3577 }
3578 
3579 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3580 {
3581  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3582  return opt_default(NULL, "video_size", arg);
3583 }
3584 
3585 static int opt_width(void *optctx, const char *opt, const char *arg)
3586 {
3587  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3588  return 0;
3589 }
3590 
3591 static int opt_height(void *optctx, const char *opt, const char *arg)
3592 {
3593  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3594  return 0;
3595 }
3596 
3597 static int opt_format(void *optctx, const char *opt, const char *arg)
3598 {
3599  file_iformat = av_find_input_format(arg);
3600  if (!file_iformat) {
3601  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3602  return AVERROR(EINVAL);
3603  }
3604  return 0;
3605 }
3606 
3607 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3608 {
3609  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3610  return opt_default(NULL, "pixel_format", arg);
3611 }
3612 
3613 static int opt_sync(void *optctx, const char *opt, const char *arg)
3614 {
3615  if (!strcmp(arg, "audio"))
3617  else if (!strcmp(arg, "video"))
3619  else if (!strcmp(arg, "ext"))
3621  else {
3622  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3623  exit(1);
3624  }
3625  return 0;
3626 }
3627 
3628 static int opt_seek(void *optctx, const char *opt, const char *arg)
3629 {
3630  start_time = parse_time_or_die(opt, arg, 1);
3631  return 0;
3632 }
3633 
3634 static int opt_duration(void *optctx, const char *opt, const char *arg)
3635 {
3636  duration = parse_time_or_die(opt, arg, 1);
3637  return 0;
3638 }
3639 
3640 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3641 {
3642  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3643  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3644  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3645  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3646  return 0;
3647 }
3648 
3649 static void opt_input_file(void *optctx, const char *filename)
3650 {
3651  if (input_filename) {
3653  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3654  filename, input_filename);
3655  exit(1);
3656  }
3657  if (!strcmp(filename, "-"))
3658  filename = "pipe:";
3659  input_filename = filename;
3660 }
3661 
3662 static int opt_codec(void *optctx, const char *opt, const char *arg)
3663 {
3664  const char *spec = strchr(opt, ':');
3665  if (!spec) {
3667  "No media specifier was specified in '%s' in option '%s'\n",
3668  arg, opt);
3669  return AVERROR(EINVAL);
3670  }
3671  spec++;
3672  switch (spec[0]) {
3673  case 'a' : audio_codec_name = arg; break;
3674  case 's' : subtitle_codec_name = arg; break;
3675  case 'v' : video_codec_name = arg; break;
3676  default:
3678  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3679  return AVERROR(EINVAL);
3680  }
3681  return 0;
3682 }
3683 
3684 static int dummy;
3685 
3686 static const OptionDef options[] = {
3687 #include "cmdutils_common_opts.h"
3688  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3689  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3690  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3691  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3692  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3693  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3694  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3695  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3696  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3697  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3698  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3699  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3700  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3701  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3702  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3703  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3704  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3705  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3706  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3707  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3708  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3709  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3710  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3711  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3712  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3713  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3714  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3715  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3716  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3717 #if CONFIG_AVFILTER
3718  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3719  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3720 #endif
3721  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3722  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3723  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3724  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3725  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3726  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3727  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3728  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3729  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3730  { NULL, },
3731 };
3732 
3733 static void show_usage(void)
3734 {
3735  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3736  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3737  av_log(NULL, AV_LOG_INFO, "\n");
3738 }
3739 
3740 void show_help_default(const char *opt, const char *arg)
3741 {
3743  show_usage();
3744  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3745  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3746  printf("\n");
3749 #if !CONFIG_AVFILTER
3751 #else
3753 #endif
3754  printf("\nWhile playing:\n"
3755  "q, ESC quit\n"
3756  "f toggle full screen\n"
3757  "p, SPC pause\n"
3758  "a cycle audio channel in the current program\n"
3759  "v cycle video channel\n"
3760  "t cycle subtitle channel in the current program\n"
3761  "c cycle program\n"
3762  "w cycle video filters or show modes\n"
3763  "s activate frame-step mode\n"
3764  "left/right seek backward/forward 10 seconds\n"
3765  "down/up seek backward/forward 1 minute\n"
3766  "page down/page up seek backward/forward 10 minutes\n"
3767  "mouse click seek to percentage in file corresponding to fraction of width\n"
3768  );
3769 }
3770 
3771 static int lockmgr(void **mtx, enum AVLockOp op)
3772 {
3773  switch(op) {
3774  case AV_LOCK_CREATE:
3775  *mtx = SDL_CreateMutex();
3776  if(!*mtx)
3777  return 1;
3778  return 0;
3779  case AV_LOCK_OBTAIN:
3780  return !!SDL_LockMutex(*mtx);
3781  case AV_LOCK_RELEASE:
3782  return !!SDL_UnlockMutex(*mtx);
3783  case AV_LOCK_DESTROY:
3784  SDL_DestroyMutex(*mtx);
3785  return 0;
3786  }
3787  return 1;
3788 }
3789 
3790 /* Called from the main */
3791 int main(int argc, char **argv)
3792 {
3793  int flags;
3794  VideoState *is;
3795  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3796 
3798  parse_loglevel(argc, argv, options);
3799 
3800  /* register all codecs, demux and protocols */
3801 #if CONFIG_AVDEVICE
3803 #endif
3804 #if CONFIG_AVFILTER
3806 #endif
3807  av_register_all();
3809 
3810  init_opts();
3811 
3812  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3813  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3814 
3815  show_banner(argc, argv, options);
3816 
3817  parse_options(NULL, argc, argv, options, opt_input_file);
3818 
3819  if (!input_filename) {
3820  show_usage();
3821  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3823  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3824  exit(1);
3825  }
3826 
3827  if (display_disable) {
3828  video_disable = 1;
3829  }
3830  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3831  if (audio_disable)
3832  flags &= ~SDL_INIT_AUDIO;
3833  if (display_disable)
3834  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3835 #if !defined(_WIN32) && !defined(__APPLE__)
3836  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3837 #endif
3838  if (SDL_Init (flags)) {
3839  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3840  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3841  exit(1);
3842  }
3843 
3844  if (!display_disable) {
3845  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3846  fs_screen_width = vi->current_w;
3847  fs_screen_height = vi->current_h;
3848  }
3849 
3850  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3851  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3852  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3853 
3855  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3856  do_exit(NULL);
3857  }
3858 
3859  av_init_packet(&flush_pkt);
3860  flush_pkt.data = (uint8_t *)&flush_pkt;
3861 
3862  is = stream_open(input_filename, file_iformat);
3863  if (!is) {
3864  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3865  do_exit(NULL);
3866  }
3867 
3868  event_loop(is);
3869 
3870  /* never returns */
3871 
3872  return 0;
3873 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1471
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:91
AVFilterContext ** filters
Definition: avfilter.h:1173
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:477
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3640
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:722
static void video_image_display(VideoState *is)
Definition: ffplay.c:1097
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:214
const char const char void * val
Definition: avisynth_c.h:634
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:478
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:453
float v
const char * s
Definition: avisynth_c.h:631
int width
Definition: ffplay.c:285
#define OPT_EXPERT
Definition: cmdutils.h:163
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:765
static double get_clock(Clock *c)
Definition: ffplay.c:1394
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:281
enum AVSampleFormat fmt
Definition: ffplay.c:132
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3591
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:66
SDL_cond * cond
Definition: ffplay.c:172
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3067
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2582
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3454
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
FrameQueue pictq
Definition: ffplay.c:218
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
Definition: ffplay.c:1343
Decoder auddec
Definition: ffplay.c:222
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:89
AVStream * subtitle_st
Definition: ffplay.c:268
This structure describes decoded (raw) audio or video data.
Definition: frame.h:171
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:123
static double rint(double x)
Definition: libm.h:141
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3482
#define SWS_BICUBIC
Definition: swscale.h:58
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1520
double rdftspeed
Definition: ffplay.c:336
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:271
static AVInputFormat * file_iformat
Definition: ffplay.c:303
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3649
double get_rotation(AVStream *st)
Definition: cmdutils.c:2090
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3588
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3597
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:5241
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:248
AVRational next_pts_tb
Definition: ffplay.c:194
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1442
Main libavfilter public API header.
int rindex
Definition: ffplay.c:165
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:81
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:402
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
const char * g
Definition: vf_curves.c:108
static int default_height
Definition: ffplay.c:309
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:273
FrameQueue sampq
Definition: ffplay.c:220
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:184
int seek_flags
Definition: ffplay.c:207
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1362
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:748
int serial
Definition: ffplay.c:118
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4188
static int64_t cur_time
Definition: ffserver.c:253
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3246
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3607
int nb_colors
number of colors in pict, undefined when pict is not set
Definition: avcodec.h:3486
int size
Definition: avcodec.h:1163
const char * b
Definition: vf_curves.c:109
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1506
MyAVPacketList * first_pkt
Definition: ffplay.c:114
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1492
static int seek_by_bytes
Definition: ffplay.c:316
double audio_diff_cum
Definition: ffplay.c:232
static void packet_queue_init(PacketQueue *q)
Definition: ffplay.c:445
Various defines for YUV<->RGB conversion.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1623
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:97
AVInputFormat * iformat
Definition: ffplay.c:200
enum AVMediaType codec_type
Definition: rtp.c:37
AVCodecContext * avctx
Definition: ffplay.c:186
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1130
int paused
Definition: ffplay.c:203
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3662
static AVStream * video_stream
int abort_request
Definition: ffplay.c:117
unsigned num_rects
Definition: avcodec.h:3511
#define a1
Definition: regdef.h:47
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1406
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1519
SDL_Rect last_display_rect
Definition: ffplay.c:281
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:234
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:479
#define FF_ARRAY_ELEMS(a)
uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE]
Definition: ffplay.c:239
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:669
int64_t channel_layout
Definition: ffplay.c:131
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:93
static int audio_disable
Definition: ffplay.c:312
AVStream * audio_st
Definition: ffplay.c:236
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2730
static const char * audio_codec_name
Definition: ffplay.c:333
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
Picture data structure.
Definition: avcodec.h:3452
int serial
Definition: ffplay.c:151
AVCodec.
Definition: avcodec.h:3181
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3370
double pts_drift
Definition: ffplay.c:139
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1994
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:5238
int width
Definition: ffplay.c:158
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:1178
AVStream * video_st
Definition: ffplay.c:275
Clock extclk
Definition: ffplay.c:216
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3206
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1369
void * opaque
Definition: avio.h:52
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:279
AVSubtitleRect ** rects
Definition: avcodec.h:3512
void av_picture_copy(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
Copy image src to dst.
Definition: avpicture.c:72
Format I/O context.
Definition: avformat.h:1272
static int64_t sws_flags
Definition: ffplay.c:105
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3337
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4211
Definition: ffplay.c:148
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:256
int av_sync_type
Definition: ffplay.c:228
unsigned int nb_stream_indexes
Definition: avformat.h:1210
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:170
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3484
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
double pts
Definition: ffplay.c:152
static AVFilter ** last_filter
Definition: avfilter.c:482
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:233
AVRational start_pts_tb
Definition: ffplay.c:192
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:96
static int read_thread(void *arg)
Definition: ffplay.c:2876
int keep_last
Definition: ffplay.c:169
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:131
int rdft_bits
Definition: ffplay.c:262
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:869
int size
Definition: ffplay.c:116
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:675
static int64_t start_time
Definition: ffplay.c:320
if()
Definition: avfilter.c:975
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1993
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:95
Lock the mutex.
Definition: avcodec.h:5240
uint8_t
static int nb_streams
Definition: ffprobe.c:226
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:533
static int default_width
Definition: ffplay.c:308
int last_video_stream
Definition: ffplay.c:297
int last_subtitle_stream
Definition: ffplay.c:297
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:642
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:238
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:2795
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3453
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:204
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2853
struct SwrContext * swr_ctx
Definition: ffplay.c:251
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
int finished
Definition: ffplay.c:188
libavcodec/libavfilter gluing utilities
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3398
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:367
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4232
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:257
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:470
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1384
static int framedrop
Definition: ffplay.c:330
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1737
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:74
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1340
AVPacket pkt
Definition: ffplay.c:108
int bytes_per_sec
Definition: ffplay.c:134
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:789
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:107
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:348
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:378
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1383
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:497
static void sigterm_handler(int sig)
Definition: ffplay.c:1330
uint8_t * data
Definition: avcodec.h:1162
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:371
int freq
Definition: ffplay.c:129
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4123
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:164
Definition: mxfdec.c:264
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:137
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:130
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:494
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:780
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:83
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3485
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:365
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:164
static int64_t duration
Definition: ffplay.c:321
AVRational sar
Definition: ffplay.c:160
AVPacket pkt_temp
Definition: ffplay.c:184
unsigned int * stream_index
Definition: avformat.h:1209
#define av_log(a,...)
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1768
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:285
PacketQueue videoq
Definition: ffplay.c:276
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2051
AVDictionary * format_opts
Definition: cmdutils.c:68
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:301
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:101
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:477
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3496
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2843
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3195
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3479
int audio_diff_avg_count
Definition: ffplay.c:235
const AVS_VideoInfo * vi
Definition: avisynth_c.h:658
int ytop
Definition: ffplay.c:285
int width
width and height of the video frame
Definition: frame.h:220
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1482
int seek_req
Definition: ffplay.c:206
int(* callback)(void *)
Definition: avio.h:51
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2352
Create a mutex.
Definition: avcodec.h:5239
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:126
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1582
int read_pause_return
Definition: ffplay.c:210
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:474
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:302
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3483
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:732
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:69
RDFTContext * rdft
Definition: ffplay.c:261
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:756
const char * r
Definition: vf_curves.c:107
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:434
static int autorotate
Definition: ffplay.c:344
int capabilities
Codec capabilities.
Definition: avcodec.h:3200
#define RGBA_IN(r, g, b, a, s)
Definition: ffplay.c:832
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:103
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3562
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1533
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
const char * arg
Definition: jacosubdec.c:66
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1335
int reallocate
Definition: ffplay.c:157
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:491
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:388
AVChapter ** chapters
Definition: avformat.h:1472
#define wrap(func)
Definition: neontest.h:62
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1292
int video_stream
Definition: ffplay.c:274
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int * queue_serial
Definition: ffplay.c:144
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1426
int xpos
Definition: ffplay.c:264
int channels
Definition: ffplay.c:130
static enum ShowMode show_mode
Definition: ffplay.c:332
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1205
#define FFMAX(a, b)
Definition: common.h:64
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:488
static const OptionDef options[]
Definition: ffplay.c:3686
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3684
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define BPP
Definition: ffplay.c:856
double audio_clock
Definition: ffplay.c:230
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:202
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2046
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3613
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2062
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2371
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3510
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:628
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3513
static int genpts
Definition: ffplay.c:323
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:861
static AVPacket flush_pkt
Definition: ffplay.c:350
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:427
double frame_last_returned_time
Definition: ffplay.c:272
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:487
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
static const char * subtitle_codec_name
Definition: ffplay.c:334
static int subtitle_disable
Definition: ffplay.c:314
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:127
int max_size
Definition: ffplay.c:168
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1328
int step
Definition: ffplay.c:286
SDL_Thread * decoder_tid
Definition: ffplay.c:195
static SDL_Surface * screen
Definition: ffplay.c:355
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:65
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:3571
SDL_mutex * mutex
Definition: ffplay.c:119
int audio_write_buf_size
Definition: ffplay.c:245
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:160
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:125
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:124
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:242
struct MyAVPacketList * next
Definition: ffplay.c:109
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:74
char filename[1024]
input or output filename
Definition: avformat.h:1348
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3492
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:247
#define FFMIN(a, b)
Definition: common.h:66
SDL_mutex * mutex
Definition: ffplay.c:171
float y
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int windex
Definition: ffplay.c:166
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:602
static int cursor_hidden
Definition: ffplay.c:338
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:534
ret
Definition: avfilter.c:974
AVSubtitle sub
Definition: ffplay.c:150
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3771
int width
picture width / height.
Definition: avcodec.h:1414
int main(int argc, char **argv)
Definition: ffplay.c:3791
int height
Definition: ffplay.c:159
static void show_usage(void)
Definition: ffplay.c:3733
int nb_packets
Definition: ffplay.c:115
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3585
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1881
int frame_drops_late
Definition: ffplay.c:253
struct AudioParams audio_src
Definition: ffplay.c:246
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3353
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1420
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:322
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:1984
int last_i_start
Definition: ffplay.c:260
uint16_t format
Definition: avcodec.h:3508
char filename[1024]
Definition: ffplay.c:284
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2498
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:858
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:114
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1525
float u
int n
Definition: avisynth_c.h:547
static int frame_queue_prev(FrameQueue *f)
Definition: ffplay.c:748
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2439
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:347
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:79
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:794
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int decoder_reorder_pts
Definition: ffplay.c:325
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:94
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1414
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:258
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1356
int paused
Definition: ffplay.c:143
static const char * input_filename
Definition: ffplay.c:304
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:819
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:685
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3740
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1284
int64_t pos
Definition: ffplay.c:154
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:297
Stream structure.
Definition: avformat.h:842
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3229
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1176
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1351
static int fs_screen_width
Definition: ffplay.c:306
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:86
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:232
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
Definition: opt.c:823
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4136
static int screen_height
Definition: ffplay.c:311
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3634
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:214
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:193
static int autoexit
Definition: ffplay.c:326
AVFrame * frame
Definition: ffplay.c:149
int serial
Definition: ffplay.c:142
enum AVMediaType codec_type
Definition: avcodec.h:1249
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:720
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:1066
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:59
enum AVCodecID codec_id
Definition: avcodec.h:1258
static void do_exit(VideoState *is)
Definition: ffplay.c:1312
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:253
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:265
int sample_rate
samples per second
Definition: avcodec.h:1985
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:199
AVIOContext * pb
I/O context.
Definition: avformat.h:1314
static int loop
Definition: ffplay.c:329
int last_paused
Definition: ffplay.c:204
static int exit_on_keydown
Definition: ffplay.c:327
FFT functions.
main external API structure.
Definition: avcodec.h:1241
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:2951
Decoder subdec
Definition: ffplay.c:224
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:265
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:139
double max_frame_duration
Definition: ffplay.c:277
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2824
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:252
Clock vidclk
Definition: ffplay.c:215
int x
Definition: f_ebur128.c:90
static void decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2210
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:912
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:479
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1563
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:305
double pts
Definition: ffplay.c:138
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:680
static int audio_thread(void *arg)
Definition: ffplay.c:2123
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
static int av_sync_type
Definition: ffplay.c:319
int pkt_serial
Definition: ffplay.c:187
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:668
int sample_rate
Sample rate of the audio data.
Definition: frame.h:422
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:1058
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:90
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:239
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1359
PacketQueue audioq
Definition: ffplay.c:237
int packet_pending
Definition: ffplay.c:189
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:117
int64_t seek_pos
Definition: ffplay.c:208
rational number numerator/denominator
Definition: rational.h:43
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:77
int allocated
Definition: ffplay.c:156
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:286
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:152
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1148
SDL_cond * cond
Definition: ffplay.c:120
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:90
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2267
struct SwsContext * sws_opts
Definition: cmdutils.c:66
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:542
AVMediaType
Definition: avutil.h:192
discard useless packets like 0 size packets in avi
Definition: avcodec.h:664
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2859
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1477
int queue_attachments_req
Definition: ffplay.c:205
unsigned nb_filters
Definition: avfilter.h:1175
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1330
#define snprintf
Definition: snprintf.h:34
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:632
int error
contains the error code or 0 if no error happened
Definition: avio.h:145
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:686
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:190
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
#define FF_ALLOC_EVENT
Definition: ffplay.c:352
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1475
int audio_stream
Definition: ffplay.c:226
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2162
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:262
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:131
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2656
char * name
unique name for this input/output in the list
Definition: avfilter.h:1353
static int64_t cursor_last_shown
Definition: ffplay.c:337
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:638
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3579
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:91
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:462
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:72
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:2956
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1786
static int flags
Definition: cpu.c:47
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1357
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:182
int frame_drops_early
Definition: ffplay.c:252
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2391
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:95
int sample_array_index
Definition: ffplay.c:259
SDL_cond * continue_read_thread
Definition: ffplay.c:299
int64_t start
Definition: avformat.h:1238
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:655
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:762
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:269
double speed
Definition: ffplay.c:141
static int exit_on_mousedown
Definition: ffplay.c:328
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:744
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1032
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:513
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
#define YUVA_OUT(d, y, u, v, a)
Definition: ffplay.c:850
static int video_thread(void *arg)
Definition: ffplay.c:2216
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:179
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1335
AVDictionary * codec_opts
Definition: cmdutils.c:68
struct AudioParams audio_tgt
Definition: ffplay.c:250
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:829
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:5242
uint8_t * audio_buf
Definition: ffplay.c:240
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
static int display_disable
Definition: ffplay.c:317
static int video_disable
Definition: ffplay.c:313
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3024
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:894
signed 16 bits
Definition: samplefmt.h:62
int audio_buf_index
Definition: ffplay.c:244
uint8_t * audio_buf1
Definition: ffplay.c:241
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3628
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:143
static int screen_width
Definition: ffplay.c:310
PacketQueue * pktq
Definition: ffplay.c:173
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:905
uint32_t start_display_time
Definition: avcodec.h:3509
FFTSample * rdft_data
Definition: ffplay.c:263
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1575
int audio_clock_serial
Definition: ffplay.c:231
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1237
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
char * key
Definition: dict.h:87
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:82
PacketQueue subtitleq
Definition: ffplay.c:269
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1284
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3644
static int lowres
Definition: ffplay.c:324
int eof
Definition: ffplay.c:282
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:87
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:579
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:331
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:480
double duration
Definition: ffplay.c:153
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:88
int eof_reached
true if eof reached
Definition: avio.h:139
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:1986
#define av_log2
Definition: intmath.h:105
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:243
SDL_Thread * read_tid
Definition: ffplay.c:199
AVPacket pkt
Definition: ffplay.c:183
int frame_size
Definition: ffplay.c:133
void av_log_set_flags(int arg)
Definition: log.c:387
int64_t start_pts
Definition: ffplay.c:191
int abort_request
Definition: ffplay.c:201
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:771
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:416
double last_updated
Definition: ffplay.c:140
Decoder viddec
Definition: ffplay.c:223
AVDictionary * swr_opts
Definition: cmdutils.c:67
int height
Definition: ffplay.c:285
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:193
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1342
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:527
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
An instance of a filter.
Definition: avfilter.h:633
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1161
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1374
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1367
int height
Definition: frame.h:220
static const char * video_codec_name
Definition: ffplay.c:335
#define MAX_QUEUE_SIZE
Definition: ffplay.c:68
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3066
PacketQueue * queue
Definition: ffplay.c:185
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:628
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:690
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
static int subtitle_thread(void *arg)
Definition: ffplay.c:2325
FrameQueue subpq
Definition: ffplay.c:219
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1459
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:167
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:300
#define FF_QUIT_EVENT
Definition: ffplay.c:353
int xleft
Definition: ffplay.c:285
#define FFSWAP(type, a, b)
Definition: common.h:69
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2011
int stream_index
Definition: avcodec.h:1164
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:884
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:97
int subtitle_stream
Definition: ffplay.c:267
unsigned int audio_buf_size
Definition: ffplay.c:242
int64_t seek_rel
Definition: ffplay.c:209
int realtime
Definition: ffplay.c:212
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:215
#define YUVA_IN(y, u, v, a, s, pal)
Definition: ffplay.c:841
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:907
static void video_display(VideoState *is)
Definition: ffplay.c:1384
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:315
SDL_Overlay * bmp
Definition: ffplay.c:155
static int show_status
Definition: ffplay.c:318
static int compute_mod(int a, int b)
Definition: ffplay.c:1143
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
This structure stores compressed data.
Definition: avcodec.h:1139
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:369
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2545
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:225
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1434
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3325
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1155
static int fs_screen_height
Definition: ffplay.c:307
double last_vis_time
Definition: ffplay.c:265
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:934
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define av_unused
Definition: attributes.h:118
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:154
AVFormatContext * ic
Definition: ffplay.c:211
simple arithmetic expression evaluator
static int width
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:706
static int16_t block[64]
Definition: dct-test.c:110