FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control */
77 #define SDL_VOLUME_STEP (SDL_MIX_MAXVOLUME / 50)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 static unsigned sws_flags = SWS_BICUBIC;
109 
110 typedef struct MyAVPacketList {
113  int serial;
115 
116 typedef struct PacketQueue {
119  int size;
121  int serial;
122  SDL_mutex *mutex;
123  SDL_cond *cond;
124 } PacketQueue;
125 
126 #define VIDEO_PICTURE_QUEUE_SIZE 3
127 #define SUBPICTURE_QUEUE_SIZE 16
128 #define SAMPLE_QUEUE_SIZE 9
129 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
130 
131 typedef struct AudioParams {
132  int freq;
133  int channels;
134  int64_t channel_layout;
138 } AudioParams;
139 
140 typedef struct Clock {
141  double pts; /* clock base */
142  double pts_drift; /* clock base minus time at which we updated the clock */
143  double last_updated;
144  double speed;
145  int serial; /* clock is based on a packet with this serial */
146  int paused;
147  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
148 } Clock;
149 
150 /* Common struct for handling all types of decoded data and allocated render buffers. */
151 typedef struct Frame {
154  AVSubtitleRect **subrects; /* rescaled subtitle rectangles in yuva */
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  SDL_Overlay *bmp;
162  int width;
163  int height;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
192  int finished;
194  SDL_cond *empty_queue_cond;
195  int64_t start_pts;
197  int64_t next_pts;
199  SDL_Thread *decoder_tid;
200 } Decoder;
201 
202 typedef struct VideoState {
203  SDL_Thread *read_tid;
207  int paused;
210  int seek_req;
212  int64_t seek_pos;
213  int64_t seek_rel;
216  int realtime;
217 
221 
225 
229 
232 
234 
236 
237  double audio_clock;
239  double audio_diff_cum; /* used for AV difference average computation */
249  unsigned int audio_buf_size; /* in bytes */
250  unsigned int audio_buf1_size;
251  int audio_buf_index; /* in bytes */
254  int muted;
256 #if CONFIG_AVFILTER
257  struct AudioParams audio_filter_src;
258 #endif
263 
264  enum ShowMode {
266  } show_mode;
273  int xpos;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
287 #if !CONFIG_AVFILTER
289 #endif
292  int eof;
293 
294  char *filename;
296  int step;
297 
298 #if CONFIG_AVFILTER
299  int vfilter_idx;
300  AVFilterContext *in_video_filter; // the first filter in the video chain
301  AVFilterContext *out_video_filter; // the last filter in the video chain
302  AVFilterContext *in_audio_filter; // the first filter in the audio chain
303  AVFilterContext *out_audio_filter; // the last filter in the audio chain
304  AVFilterGraph *agraph; // audio filter graph
305 #endif
306 
308 
310 } VideoState;
311 
312 /* options specified by the user */
314 static const char *input_filename;
315 static const char *window_title;
316 static int fs_screen_width;
317 static int fs_screen_height;
318 static int default_width = 640;
319 static int default_height = 480;
320 static int screen_width = 0;
321 static int screen_height = 0;
322 static int audio_disable;
323 static int video_disable;
324 static int subtitle_disable;
325 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
326 static int seek_by_bytes = -1;
327 static int display_disable;
328 static int show_status = 1;
330 static int64_t start_time = AV_NOPTS_VALUE;
331 static int64_t duration = AV_NOPTS_VALUE;
332 static int fast = 0;
333 static int genpts = 0;
334 static int lowres = 0;
335 static int decoder_reorder_pts = -1;
336 static int autoexit;
337 static int exit_on_keydown;
338 static int exit_on_mousedown;
339 static int loop = 1;
340 static int framedrop = -1;
341 static int infinite_buffer = -1;
342 static enum ShowMode show_mode = SHOW_MODE_NONE;
343 static const char *audio_codec_name;
344 static const char *subtitle_codec_name;
345 static const char *video_codec_name;
346 double rdftspeed = 0.02;
347 static int64_t cursor_last_shown;
348 static int cursor_hidden = 0;
349 #if CONFIG_AVFILTER
350 static const char **vfilters_list = NULL;
351 static int nb_vfilters = 0;
352 static char *afilters = NULL;
353 #endif
354 static int autorotate = 1;
355 
356 /* current context */
357 static int is_full_screen;
358 static int64_t audio_callback_time;
359 
361 
362 #define FF_ALLOC_EVENT (SDL_USEREVENT)
363 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
364 
365 static SDL_Surface *screen;
366 
367 #if CONFIG_AVFILTER
368 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
369 {
370  GROW_ARRAY(vfilters_list, nb_vfilters);
371  vfilters_list[nb_vfilters - 1] = arg;
372  return 0;
373 }
374 #endif
375 
376 static inline
377 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
378  enum AVSampleFormat fmt2, int64_t channel_count2)
379 {
380  /* If channel count == 1, planar and non-planar formats are the same */
381  if (channel_count1 == 1 && channel_count2 == 1)
383  else
384  return channel_count1 != channel_count2 || fmt1 != fmt2;
385 }
386 
387 static inline
388 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
389 {
390  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
391  return channel_layout;
392  else
393  return 0;
394 }
395 
396 static void free_picture(Frame *vp);
397 
399 {
400  MyAVPacketList *pkt1;
401 
402  if (q->abort_request)
403  return -1;
404 
405  pkt1 = av_malloc(sizeof(MyAVPacketList));
406  if (!pkt1)
407  return -1;
408  pkt1->pkt = *pkt;
409  pkt1->next = NULL;
410  if (pkt == &flush_pkt)
411  q->serial++;
412  pkt1->serial = q->serial;
413 
414  if (!q->last_pkt)
415  q->first_pkt = pkt1;
416  else
417  q->last_pkt->next = pkt1;
418  q->last_pkt = pkt1;
419  q->nb_packets++;
420  q->size += pkt1->pkt.size + sizeof(*pkt1);
421  /* XXX: should duplicate packet data in DV case */
422  SDL_CondSignal(q->cond);
423  return 0;
424 }
425 
427 {
428  int ret;
429 
430  SDL_LockMutex(q->mutex);
431  ret = packet_queue_put_private(q, pkt);
432  SDL_UnlockMutex(q->mutex);
433 
434  if (pkt != &flush_pkt && ret < 0)
435  av_packet_unref(pkt);
436 
437  return ret;
438 }
439 
440 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
441 {
442  AVPacket pkt1, *pkt = &pkt1;
443  av_init_packet(pkt);
444  pkt->data = NULL;
445  pkt->size = 0;
446  pkt->stream_index = stream_index;
447  return packet_queue_put(q, pkt);
448 }
449 
450 /* packet queue handling */
452 {
453  memset(q, 0, sizeof(PacketQueue));
454  q->mutex = SDL_CreateMutex();
455  if (!q->mutex) {
456  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
457  return AVERROR(ENOMEM);
458  }
459  q->cond = SDL_CreateCond();
460  if (!q->cond) {
461  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
462  return AVERROR(ENOMEM);
463  }
464  q->abort_request = 1;
465  return 0;
466 }
467 
469 {
470  MyAVPacketList *pkt, *pkt1;
471 
472  SDL_LockMutex(q->mutex);
473  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
474  pkt1 = pkt->next;
475  av_packet_unref(&pkt->pkt);
476  av_freep(&pkt);
477  }
478  q->last_pkt = NULL;
479  q->first_pkt = NULL;
480  q->nb_packets = 0;
481  q->size = 0;
482  SDL_UnlockMutex(q->mutex);
483 }
484 
486 {
488  SDL_DestroyMutex(q->mutex);
489  SDL_DestroyCond(q->cond);
490 }
491 
493 {
494  SDL_LockMutex(q->mutex);
495 
496  q->abort_request = 1;
497 
498  SDL_CondSignal(q->cond);
499 
500  SDL_UnlockMutex(q->mutex);
501 }
502 
504 {
505  SDL_LockMutex(q->mutex);
506  q->abort_request = 0;
507  packet_queue_put_private(q, &flush_pkt);
508  SDL_UnlockMutex(q->mutex);
509 }
510 
511 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
512 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
513 {
514  MyAVPacketList *pkt1;
515  int ret;
516 
517  SDL_LockMutex(q->mutex);
518 
519  for (;;) {
520  if (q->abort_request) {
521  ret = -1;
522  break;
523  }
524 
525  pkt1 = q->first_pkt;
526  if (pkt1) {
527  q->first_pkt = pkt1->next;
528  if (!q->first_pkt)
529  q->last_pkt = NULL;
530  q->nb_packets--;
531  q->size -= pkt1->pkt.size + sizeof(*pkt1);
532  *pkt = pkt1->pkt;
533  if (serial)
534  *serial = pkt1->serial;
535  av_free(pkt1);
536  ret = 1;
537  break;
538  } else if (!block) {
539  ret = 0;
540  break;
541  } else {
542  SDL_CondWait(q->cond, q->mutex);
543  }
544  }
545  SDL_UnlockMutex(q->mutex);
546  return ret;
547 }
548 
549 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
550  memset(d, 0, sizeof(Decoder));
551  d->avctx = avctx;
552  d->queue = queue;
553  d->empty_queue_cond = empty_queue_cond;
555 }
556 
558  int got_frame = 0;
559 
560  do {
561  int ret = -1;
562 
563  if (d->queue->abort_request)
564  return -1;
565 
566  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
567  AVPacket pkt;
568  do {
569  if (d->queue->nb_packets == 0)
570  SDL_CondSignal(d->empty_queue_cond);
571  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
572  return -1;
573  if (pkt.data == flush_pkt.data) {
575  d->finished = 0;
576  d->next_pts = d->start_pts;
577  d->next_pts_tb = d->start_pts_tb;
578  }
579  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
580  av_packet_unref(&d->pkt);
581  d->pkt_temp = d->pkt = pkt;
582  d->packet_pending = 1;
583  }
584 
585  switch (d->avctx->codec_type) {
586  case AVMEDIA_TYPE_VIDEO:
587  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
588  if (got_frame) {
589  if (decoder_reorder_pts == -1) {
590  frame->pts = av_frame_get_best_effort_timestamp(frame);
591  } else if (decoder_reorder_pts) {
592  frame->pts = frame->pkt_pts;
593  } else {
594  frame->pts = frame->pkt_dts;
595  }
596  }
597  break;
598  case AVMEDIA_TYPE_AUDIO:
599  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
600  if (got_frame) {
601  AVRational tb = (AVRational){1, frame->sample_rate};
602  if (frame->pts != AV_NOPTS_VALUE)
603  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
604  else if (frame->pkt_pts != AV_NOPTS_VALUE)
605  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
606  else if (d->next_pts != AV_NOPTS_VALUE)
607  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
608  if (frame->pts != AV_NOPTS_VALUE) {
609  d->next_pts = frame->pts + frame->nb_samples;
610  d->next_pts_tb = tb;
611  }
612  }
613  break;
615  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
616  break;
617  }
618 
619  if (ret < 0) {
620  d->packet_pending = 0;
621  } else {
622  d->pkt_temp.dts =
624  if (d->pkt_temp.data) {
626  ret = d->pkt_temp.size;
627  d->pkt_temp.data += ret;
628  d->pkt_temp.size -= ret;
629  if (d->pkt_temp.size <= 0)
630  d->packet_pending = 0;
631  } else {
632  if (!got_frame) {
633  d->packet_pending = 0;
634  d->finished = d->pkt_serial;
635  }
636  }
637  }
638  } while (!got_frame && !d->finished);
639 
640  return got_frame;
641 }
642 
643 static void decoder_destroy(Decoder *d) {
644  av_packet_unref(&d->pkt);
645 }
646 
648 {
649  int i;
650  for (i = 0; i < vp->sub.num_rects; i++) {
651  av_freep(&vp->subrects[i]->data[0]);
652  av_freep(&vp->subrects[i]);
653  }
654  av_freep(&vp->subrects);
655  av_frame_unref(vp->frame);
656  avsubtitle_free(&vp->sub);
657 }
658 
659 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
660 {
661  int i;
662  memset(f, 0, sizeof(FrameQueue));
663  if (!(f->mutex = SDL_CreateMutex())) {
664  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
665  return AVERROR(ENOMEM);
666  }
667  if (!(f->cond = SDL_CreateCond())) {
668  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
669  return AVERROR(ENOMEM);
670  }
671  f->pktq = pktq;
672  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
673  f->keep_last = !!keep_last;
674  for (i = 0; i < f->max_size; i++)
675  if (!(f->queue[i].frame = av_frame_alloc()))
676  return AVERROR(ENOMEM);
677  return 0;
678 }
679 
681 {
682  int i;
683  for (i = 0; i < f->max_size; i++) {
684  Frame *vp = &f->queue[i];
686  av_frame_free(&vp->frame);
687  free_picture(vp);
688  }
689  SDL_DestroyMutex(f->mutex);
690  SDL_DestroyCond(f->cond);
691 }
692 
694 {
695  SDL_LockMutex(f->mutex);
696  SDL_CondSignal(f->cond);
697  SDL_UnlockMutex(f->mutex);
698 }
699 
701 {
702  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
703 }
704 
706 {
707  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
708 }
709 
711 {
712  return &f->queue[f->rindex];
713 }
714 
716 {
717  /* wait until we have space to put a new frame */
718  SDL_LockMutex(f->mutex);
719  while (f->size >= f->max_size &&
720  !f->pktq->abort_request) {
721  SDL_CondWait(f->cond, f->mutex);
722  }
723  SDL_UnlockMutex(f->mutex);
724 
725  if (f->pktq->abort_request)
726  return NULL;
727 
728  return &f->queue[f->windex];
729 }
730 
732 {
733  /* wait until we have a readable a new frame */
734  SDL_LockMutex(f->mutex);
735  while (f->size - f->rindex_shown <= 0 &&
736  !f->pktq->abort_request) {
737  SDL_CondWait(f->cond, f->mutex);
738  }
739  SDL_UnlockMutex(f->mutex);
740 
741  if (f->pktq->abort_request)
742  return NULL;
743 
744  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
745 }
746 
748 {
749  if (++f->windex == f->max_size)
750  f->windex = 0;
751  SDL_LockMutex(f->mutex);
752  f->size++;
753  SDL_CondSignal(f->cond);
754  SDL_UnlockMutex(f->mutex);
755 }
756 
758 {
759  if (f->keep_last && !f->rindex_shown) {
760  f->rindex_shown = 1;
761  return;
762  }
764  if (++f->rindex == f->max_size)
765  f->rindex = 0;
766  SDL_LockMutex(f->mutex);
767  f->size--;
768  SDL_CondSignal(f->cond);
769  SDL_UnlockMutex(f->mutex);
770 }
771 
772 /* jump back to the previous frame if available by resetting rindex_shown */
774 {
775  int ret = f->rindex_shown;
776  f->rindex_shown = 0;
777  return ret;
778 }
779 
780 /* return the number of undisplayed frames in the queue */
782 {
783  return f->size - f->rindex_shown;
784 }
785 
786 /* return last shown position */
788 {
789  Frame *fp = &f->queue[f->rindex];
790  if (f->rindex_shown && fp->serial == f->pktq->serial)
791  return fp->pos;
792  else
793  return -1;
794 }
795 
796 static void decoder_abort(Decoder *d, FrameQueue *fq)
797 {
799  frame_queue_signal(fq);
800  SDL_WaitThread(d->decoder_tid, NULL);
801  d->decoder_tid = NULL;
803 }
804 
805 static inline void fill_rectangle(SDL_Surface *screen,
806  int x, int y, int w, int h, int color, int update)
807 {
808  SDL_Rect rect;
809  rect.x = x;
810  rect.y = y;
811  rect.w = w;
812  rect.h = h;
813  SDL_FillRect(screen, &rect, color);
814  if (update && w > 0 && h > 0)
815  SDL_UpdateRect(screen, x, y, w, h);
816 }
817 
818 /* draw only the border of a rectangle */
819 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
820 {
821  int w1, w2, h1, h2;
822 
823  /* fill the background */
824  w1 = x;
825  if (w1 < 0)
826  w1 = 0;
827  w2 = width - (x + w);
828  if (w2 < 0)
829  w2 = 0;
830  h1 = y;
831  if (h1 < 0)
832  h1 = 0;
833  h2 = height - (y + h);
834  if (h2 < 0)
835  h2 = 0;
837  xleft, ytop,
838  w1, height,
839  color, update);
841  xleft + width - w2, ytop,
842  w2, height,
843  color, update);
845  xleft + w1, ytop,
846  width - w1 - w2, h1,
847  color, update);
849  xleft + w1, ytop + height - h2,
850  width - w1 - w2, h2,
851  color, update);
852 }
853 
854 #define ALPHA_BLEND(a, oldp, newp, s)\
855 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
856 
857 
858 
859 #define BPP 1
860 
861 static void blend_subrect(uint8_t **data, int *linesize, const AVSubtitleRect *rect, int imgw, int imgh)
862 {
863  int x, y, Y, U, V, A;
864  uint8_t *lum, *cb, *cr;
865  int dstx, dsty, dstw, dsth;
866  const AVSubtitleRect *src = rect;
867 
868  dstw = av_clip(rect->w, 0, imgw);
869  dsth = av_clip(rect->h, 0, imgh);
870  dstx = av_clip(rect->x, 0, imgw - dstw);
871  dsty = av_clip(rect->y, 0, imgh - dsth);
872  lum = data[0] + dstx + dsty * linesize[0];
873  cb = data[1] + dstx/2 + (dsty >> 1) * linesize[1];
874  cr = data[2] + dstx/2 + (dsty >> 1) * linesize[2];
875 
876  for (y = 0; y<dsth; y++) {
877  for (x = 0; x<dstw; x++) {
878  Y = src->data[0][x + y*src->linesize[0]];
879  A = src->data[3][x + y*src->linesize[3]];
880  lum[0] = ALPHA_BLEND(A, lum[0], Y, 0);
881  lum++;
882  }
883  lum += linesize[0] - dstw;
884  }
885 
886  for (y = 0; y<dsth/2; y++) {
887  for (x = 0; x<dstw/2; x++) {
888  U = src->data[1][x + y*src->linesize[1]];
889  V = src->data[2][x + y*src->linesize[2]];
890  A = src->data[3][2*x + 2*y *src->linesize[3]]
891  + src->data[3][2*x + 1 + 2*y *src->linesize[3]]
892  + src->data[3][2*x + 1 + (2*y+1)*src->linesize[3]]
893  + src->data[3][2*x + (2*y+1)*src->linesize[3]];
894  cb[0] = ALPHA_BLEND(A>>2, cb[0], U, 0);
895  cr[0] = ALPHA_BLEND(A>>2, cr[0], V, 0);
896  cb++;
897  cr++;
898  }
899  cb += linesize[1] - dstw/2;
900  cr += linesize[2] - dstw/2;
901  }
902 }
903 
904 static void free_picture(Frame *vp)
905 {
906  if (vp->bmp) {
907  SDL_FreeYUVOverlay(vp->bmp);
908  vp->bmp = NULL;
909  }
910 }
911 
912 static void calculate_display_rect(SDL_Rect *rect,
913  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
914  int pic_width, int pic_height, AVRational pic_sar)
915 {
916  float aspect_ratio;
917  int width, height, x, y;
918 
919  if (pic_sar.num == 0)
920  aspect_ratio = 0;
921  else
922  aspect_ratio = av_q2d(pic_sar);
923 
924  if (aspect_ratio <= 0.0)
925  aspect_ratio = 1.0;
926  aspect_ratio *= (float)pic_width / (float)pic_height;
927 
928  /* XXX: we suppose the screen has a 1.0 pixel ratio */
929  height = scr_height;
930  width = lrint(height * aspect_ratio) & ~1;
931  if (width > scr_width) {
932  width = scr_width;
933  height = lrint(width / aspect_ratio) & ~1;
934  }
935  x = (scr_width - width) / 2;
936  y = (scr_height - height) / 2;
937  rect->x = scr_xleft + x;
938  rect->y = scr_ytop + y;
939  rect->w = FFMAX(width, 1);
940  rect->h = FFMAX(height, 1);
941 }
942 
944 {
945  Frame *vp;
946  Frame *sp;
947  SDL_Rect rect;
948  int i;
949 
950  vp = frame_queue_peek(&is->pictq);
951  if (vp->bmp) {
952  if (is->subtitle_st) {
953  if (frame_queue_nb_remaining(&is->subpq) > 0) {
954  sp = frame_queue_peek(&is->subpq);
955 
956  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
957  uint8_t *data[4];
958  int linesize[4];
959 
960  SDL_LockYUVOverlay (vp->bmp);
961 
962  data[0] = vp->bmp->pixels[0];
963  data[1] = vp->bmp->pixels[2];
964  data[2] = vp->bmp->pixels[1];
965 
966  linesize[0] = vp->bmp->pitches[0];
967  linesize[1] = vp->bmp->pitches[2];
968  linesize[2] = vp->bmp->pitches[1];
969 
970  for (i = 0; i < sp->sub.num_rects; i++)
971  blend_subrect(data, linesize, sp->subrects[i],
972  vp->bmp->w, vp->bmp->h);
973 
974  SDL_UnlockYUVOverlay (vp->bmp);
975  }
976  }
977  }
978 
979  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
980 
981  SDL_DisplayYUVOverlay(vp->bmp, &rect);
982 
983  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
984  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
985  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
986  is->last_display_rect = rect;
987  }
988  }
989 }
990 
991 static inline int compute_mod(int a, int b)
992 {
993  return a < 0 ? a%b + b : a%b;
994 }
995 
997 {
998  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
999  int ch, channels, h, h2, bgcolor, fgcolor;
1000  int64_t time_diff;
1001  int rdft_bits, nb_freq;
1002 
1003  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1004  ;
1005  nb_freq = 1 << (rdft_bits - 1);
1006 
1007  /* compute display index : center on currently output samples */
1008  channels = s->audio_tgt.channels;
1009  nb_display_channels = channels;
1010  if (!s->paused) {
1011  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1012  n = 2 * channels;
1013  delay = s->audio_write_buf_size;
1014  delay /= n;
1015 
1016  /* to be more precise, we take into account the time spent since
1017  the last buffer computation */
1018  if (audio_callback_time) {
1019  time_diff = av_gettime_relative() - audio_callback_time;
1020  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1021  }
1022 
1023  delay += 2 * data_used;
1024  if (delay < data_used)
1025  delay = data_used;
1026 
1027  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1028  if (s->show_mode == SHOW_MODE_WAVES) {
1029  h = INT_MIN;
1030  for (i = 0; i < 1000; i += channels) {
1031  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1032  int a = s->sample_array[idx];
1033  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1034  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1035  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1036  int score = a - d;
1037  if (h < score && (b ^ c) < 0) {
1038  h = score;
1039  i_start = idx;
1040  }
1041  }
1042  }
1043 
1044  s->last_i_start = i_start;
1045  } else {
1046  i_start = s->last_i_start;
1047  }
1048 
1049  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1050  if (s->show_mode == SHOW_MODE_WAVES) {
1052  s->xleft, s->ytop, s->width, s->height,
1053  bgcolor, 0);
1054 
1055  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1056 
1057  /* total height for one channel */
1058  h = s->height / nb_display_channels;
1059  /* graph height / 2 */
1060  h2 = (h * 9) / 20;
1061  for (ch = 0; ch < nb_display_channels; ch++) {
1062  i = i_start + ch;
1063  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1064  for (x = 0; x < s->width; x++) {
1065  y = (s->sample_array[i] * h2) >> 15;
1066  if (y < 0) {
1067  y = -y;
1068  ys = y1 - y;
1069  } else {
1070  ys = y1;
1071  }
1073  s->xleft + x, ys, 1, y,
1074  fgcolor, 0);
1075  i += channels;
1076  if (i >= SAMPLE_ARRAY_SIZE)
1077  i -= SAMPLE_ARRAY_SIZE;
1078  }
1079  }
1080 
1081  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1082 
1083  for (ch = 1; ch < nb_display_channels; ch++) {
1084  y = s->ytop + ch * h;
1086  s->xleft, y, s->width, 1,
1087  fgcolor, 0);
1088  }
1089  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1090  } else {
1091  nb_display_channels= FFMIN(nb_display_channels, 2);
1092  if (rdft_bits != s->rdft_bits) {
1093  av_rdft_end(s->rdft);
1094  av_free(s->rdft_data);
1095  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1096  s->rdft_bits = rdft_bits;
1097  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1098  }
1099  if (!s->rdft || !s->rdft_data){
1100  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1101  s->show_mode = SHOW_MODE_WAVES;
1102  } else {
1103  FFTSample *data[2];
1104  for (ch = 0; ch < nb_display_channels; ch++) {
1105  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1106  i = i_start + ch;
1107  for (x = 0; x < 2 * nb_freq; x++) {
1108  double w = (x-nb_freq) * (1.0 / nb_freq);
1109  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1110  i += channels;
1111  if (i >= SAMPLE_ARRAY_SIZE)
1112  i -= SAMPLE_ARRAY_SIZE;
1113  }
1114  av_rdft_calc(s->rdft, data[ch]);
1115  }
1116  /* Least efficient way to do this, we should of course
1117  * directly access it but it is more than fast enough. */
1118  for (y = 0; y < s->height; y++) {
1119  double w = 1 / sqrt(nb_freq);
1120  int a = sqrt(w * hypot(data[0][2 * y + 0], data[0][2 * y + 1]));
1121  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1122  : a;
1123  a = FFMIN(a, 255);
1124  b = FFMIN(b, 255);
1125  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1126 
1128  s->xpos, s->height-y, 1, 1,
1129  fgcolor, 0);
1130  }
1131  }
1132  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1133  if (!s->paused)
1134  s->xpos++;
1135  if (s->xpos >= s->width)
1136  s->xpos= s->xleft;
1137  }
1138 }
1139 
1140 static void stream_component_close(VideoState *is, int stream_index)
1141 {
1142  AVFormatContext *ic = is->ic;
1143  AVCodecContext *avctx;
1144 
1145  if (stream_index < 0 || stream_index >= ic->nb_streams)
1146  return;
1147  avctx = ic->streams[stream_index]->codec;
1148 
1149  switch (avctx->codec_type) {
1150  case AVMEDIA_TYPE_AUDIO:
1151  decoder_abort(&is->auddec, &is->sampq);
1152  SDL_CloseAudio();
1153  decoder_destroy(&is->auddec);
1154  swr_free(&is->swr_ctx);
1155  av_freep(&is->audio_buf1);
1156  is->audio_buf1_size = 0;
1157  is->audio_buf = NULL;
1158 
1159  if (is->rdft) {
1160  av_rdft_end(is->rdft);
1161  av_freep(&is->rdft_data);
1162  is->rdft = NULL;
1163  is->rdft_bits = 0;
1164  }
1165  break;
1166  case AVMEDIA_TYPE_VIDEO:
1167  decoder_abort(&is->viddec, &is->pictq);
1168  decoder_destroy(&is->viddec);
1169  break;
1170  case AVMEDIA_TYPE_SUBTITLE:
1171  decoder_abort(&is->subdec, &is->subpq);
1172  decoder_destroy(&is->subdec);
1173  break;
1174  default:
1175  break;
1176  }
1177 
1178  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1179  avcodec_close(avctx);
1180  switch (avctx->codec_type) {
1181  case AVMEDIA_TYPE_AUDIO:
1182  is->audio_st = NULL;
1183  is->audio_stream = -1;
1184  break;
1185  case AVMEDIA_TYPE_VIDEO:
1186  is->video_st = NULL;
1187  is->video_stream = -1;
1188  break;
1189  case AVMEDIA_TYPE_SUBTITLE:
1190  is->subtitle_st = NULL;
1191  is->subtitle_stream = -1;
1192  break;
1193  default:
1194  break;
1195  }
1196 }
1197 
1198 static void stream_close(VideoState *is)
1199 {
1200  /* XXX: use a special url_shutdown call to abort parse cleanly */
1201  is->abort_request = 1;
1202  SDL_WaitThread(is->read_tid, NULL);
1203 
1204  /* close each stream */
1205  if (is->audio_stream >= 0)
1207  if (is->video_stream >= 0)
1209  if (is->subtitle_stream >= 0)
1211 
1212  avformat_close_input(&is->ic);
1213 
1217 
1218  /* free all pictures */
1219  frame_queue_destory(&is->pictq);
1220  frame_queue_destory(&is->sampq);
1221  frame_queue_destory(&is->subpq);
1222  SDL_DestroyCond(is->continue_read_thread);
1223 #if !CONFIG_AVFILTER
1225 #endif
1227  av_free(is->filename);
1228  av_free(is);
1229 }
1230 
1231 static void do_exit(VideoState *is)
1232 {
1233  if (is) {
1234  stream_close(is);
1235  }
1237  uninit_opts();
1238 #if CONFIG_AVFILTER
1239  av_freep(&vfilters_list);
1240 #endif
1242  if (show_status)
1243  printf("\n");
1244  SDL_Quit();
1245  av_log(NULL, AV_LOG_QUIET, "%s", "");
1246  exit(0);
1247 }
1248 
1249 static void sigterm_handler(int sig)
1250 {
1251  exit(123);
1252 }
1253 
1255 {
1256  SDL_Rect rect;
1257  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1258  default_width = rect.w;
1259  default_height = rect.h;
1260 }
1261 
1262 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1263 {
1264  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1265  int w,h;
1266 
1267  if (is_full_screen) flags |= SDL_FULLSCREEN;
1268  else flags |= SDL_RESIZABLE;
1269 
1270  if (vp && vp->width)
1271  set_default_window_size(vp->width, vp->height, vp->sar);
1272 
1274  w = fs_screen_width;
1275  h = fs_screen_height;
1276  } else if (!is_full_screen && screen_width) {
1277  w = screen_width;
1278  h = screen_height;
1279  } else {
1280  w = default_width;
1281  h = default_height;
1282  }
1283  w = FFMIN(16383, w);
1284  if (screen && is->width == screen->w && screen->w == w
1285  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1286  return 0;
1287  screen = SDL_SetVideoMode(w, h, 0, flags);
1288  if (!screen) {
1289  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1290  do_exit(is);
1291  }
1292  if (!window_title)
1294  SDL_WM_SetCaption(window_title, window_title);
1295 
1296  is->width = screen->w;
1297  is->height = screen->h;
1298 
1299  return 0;
1300 }
1301 
1302 /* display the current picture, if any */
1303 static void video_display(VideoState *is)
1304 {
1305  if (!screen)
1306  video_open(is, 0, NULL);
1307  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1308  video_audio_display(is);
1309  else if (is->video_st)
1310  video_image_display(is);
1311 }
1312 
1313 static double get_clock(Clock *c)
1314 {
1315  if (*c->queue_serial != c->serial)
1316  return NAN;
1317  if (c->paused) {
1318  return c->pts;
1319  } else {
1320  double time = av_gettime_relative() / 1000000.0;
1321  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1322  }
1323 }
1324 
1325 static void set_clock_at(Clock *c, double pts, int serial, double time)
1326 {
1327  c->pts = pts;
1328  c->last_updated = time;
1329  c->pts_drift = c->pts - time;
1330  c->serial = serial;
1331 }
1332 
1333 static void set_clock(Clock *c, double pts, int serial)
1334 {
1335  double time = av_gettime_relative() / 1000000.0;
1336  set_clock_at(c, pts, serial, time);
1337 }
1338 
1339 static void set_clock_speed(Clock *c, double speed)
1340 {
1341  set_clock(c, get_clock(c), c->serial);
1342  c->speed = speed;
1343 }
1344 
1345 static void init_clock(Clock *c, int *queue_serial)
1346 {
1347  c->speed = 1.0;
1348  c->paused = 0;
1349  c->queue_serial = queue_serial;
1350  set_clock(c, NAN, -1);
1351 }
1352 
1353 static void sync_clock_to_slave(Clock *c, Clock *slave)
1354 {
1355  double clock = get_clock(c);
1356  double slave_clock = get_clock(slave);
1357  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1358  set_clock(c, slave_clock, slave->serial);
1359 }
1360 
1362  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1363  if (is->video_st)
1364  return AV_SYNC_VIDEO_MASTER;
1365  else
1366  return AV_SYNC_AUDIO_MASTER;
1367  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1368  if (is->audio_st)
1369  return AV_SYNC_AUDIO_MASTER;
1370  else
1371  return AV_SYNC_EXTERNAL_CLOCK;
1372  } else {
1373  return AV_SYNC_EXTERNAL_CLOCK;
1374  }
1375 }
1376 
1377 /* get the current master clock value */
1378 static double get_master_clock(VideoState *is)
1379 {
1380  double val;
1381 
1382  switch (get_master_sync_type(is)) {
1383  case AV_SYNC_VIDEO_MASTER:
1384  val = get_clock(&is->vidclk);
1385  break;
1386  case AV_SYNC_AUDIO_MASTER:
1387  val = get_clock(&is->audclk);
1388  break;
1389  default:
1390  val = get_clock(&is->extclk);
1391  break;
1392  }
1393  return val;
1394 }
1395 
1397  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1400  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1403  } else {
1404  double speed = is->extclk.speed;
1405  if (speed != 1.0)
1406  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1407  }
1408 }
1409 
1410 /* seek in the stream */
1411 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1412 {
1413  if (!is->seek_req) {
1414  is->seek_pos = pos;
1415  is->seek_rel = rel;
1416  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1417  if (seek_by_bytes)
1419  is->seek_req = 1;
1420  SDL_CondSignal(is->continue_read_thread);
1421  }
1422 }
1423 
1424 /* pause or resume the video */
1426 {
1427  if (is->paused) {
1428  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1429  if (is->read_pause_return != AVERROR(ENOSYS)) {
1430  is->vidclk.paused = 0;
1431  }
1432  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1433  }
1434  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1435  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1436 }
1437 
1438 static void toggle_pause(VideoState *is)
1439 {
1440  stream_toggle_pause(is);
1441  is->step = 0;
1442 }
1443 
1444 static void toggle_mute(VideoState *is)
1445 {
1446  is->muted = !is->muted;
1447 }
1448 
1449 static void update_volume(VideoState *is, int sign, int step)
1450 {
1451  is->audio_volume = av_clip(is->audio_volume + sign * step, 0, SDL_MIX_MAXVOLUME);
1452 }
1453 
1455 {
1456  /* if the stream is paused unpause it, then step */
1457  if (is->paused)
1458  stream_toggle_pause(is);
1459  is->step = 1;
1460 }
1461 
1462 static double compute_target_delay(double delay, VideoState *is)
1463 {
1464  double sync_threshold, diff = 0;
1465 
1466  /* update delay to follow master synchronisation source */
1468  /* if video is slave, we try to correct big delays by
1469  duplicating or deleting a frame */
1470  diff = get_clock(&is->vidclk) - get_master_clock(is);
1471 
1472  /* skip or repeat frame. We take into account the
1473  delay to compute the threshold. I still don't know
1474  if it is the best guess */
1475  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1476  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1477  if (diff <= -sync_threshold)
1478  delay = FFMAX(0, delay + diff);
1479  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1480  delay = delay + diff;
1481  else if (diff >= sync_threshold)
1482  delay = 2 * delay;
1483  }
1484  }
1485 
1486  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1487  delay, -diff);
1488 
1489  return delay;
1490 }
1491 
1492 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1493  if (vp->serial == nextvp->serial) {
1494  double duration = nextvp->pts - vp->pts;
1495  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1496  return vp->duration;
1497  else
1498  return duration;
1499  } else {
1500  return 0.0;
1501  }
1502 }
1503 
1504 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1505  /* update current video pts */
1506  set_clock(&is->vidclk, pts, serial);
1507  sync_clock_to_slave(&is->extclk, &is->vidclk);
1508 }
1509 
1510 /* called to display each frame */
1511 static void video_refresh(void *opaque, double *remaining_time)
1512 {
1513  VideoState *is = opaque;
1514  double time;
1515 
1516  Frame *sp, *sp2;
1517 
1518  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1520 
1521  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1522  time = av_gettime_relative() / 1000000.0;
1523  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1524  video_display(is);
1525  is->last_vis_time = time;
1526  }
1527  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1528  }
1529 
1530  if (is->video_st) {
1531  int redisplay = 0;
1532  if (is->force_refresh)
1533  redisplay = frame_queue_prev(&is->pictq);
1534 retry:
1535  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1536  // nothing to do, no picture to display in the queue
1537  } else {
1538  double last_duration, duration, delay;
1539  Frame *vp, *lastvp;
1540 
1541  /* dequeue the picture */
1542  lastvp = frame_queue_peek_last(&is->pictq);
1543  vp = frame_queue_peek(&is->pictq);
1544 
1545  if (vp->serial != is->videoq.serial) {
1546  frame_queue_next(&is->pictq);
1547  redisplay = 0;
1548  goto retry;
1549  }
1550 
1551  if (lastvp->serial != vp->serial && !redisplay)
1552  is->frame_timer = av_gettime_relative() / 1000000.0;
1553 
1554  if (is->paused)
1555  goto display;
1556 
1557  /* compute nominal last_duration */
1558  last_duration = vp_duration(is, lastvp, vp);
1559  if (redisplay)
1560  delay = 0.0;
1561  else
1562  delay = compute_target_delay(last_duration, is);
1563 
1564  time= av_gettime_relative()/1000000.0;
1565  if (time < is->frame_timer + delay && !redisplay) {
1566  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1567  return;
1568  }
1569 
1570  is->frame_timer += delay;
1571  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1572  is->frame_timer = time;
1573 
1574  SDL_LockMutex(is->pictq.mutex);
1575  if (!redisplay && !isnan(vp->pts))
1576  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1577  SDL_UnlockMutex(is->pictq.mutex);
1578 
1579  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1580  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1581  duration = vp_duration(is, vp, nextvp);
1582  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1583  if (!redisplay)
1584  is->frame_drops_late++;
1585  frame_queue_next(&is->pictq);
1586  redisplay = 0;
1587  goto retry;
1588  }
1589  }
1590 
1591  if (is->subtitle_st) {
1592  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1593  sp = frame_queue_peek(&is->subpq);
1594 
1595  if (frame_queue_nb_remaining(&is->subpq) > 1)
1596  sp2 = frame_queue_peek_next(&is->subpq);
1597  else
1598  sp2 = NULL;
1599 
1600  if (sp->serial != is->subtitleq.serial
1601  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1602  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1603  {
1604  frame_queue_next(&is->subpq);
1605  } else {
1606  break;
1607  }
1608  }
1609  }
1610 
1611 display:
1612  /* display picture */
1613  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1614  video_display(is);
1615 
1616  frame_queue_next(&is->pictq);
1617 
1618  if (is->step && !is->paused)
1619  stream_toggle_pause(is);
1620  }
1621  }
1622  is->force_refresh = 0;
1623  if (show_status) {
1624  static int64_t last_time;
1625  int64_t cur_time;
1626  int aqsize, vqsize, sqsize;
1627  double av_diff;
1628 
1629  cur_time = av_gettime_relative();
1630  if (!last_time || (cur_time - last_time) >= 30000) {
1631  aqsize = 0;
1632  vqsize = 0;
1633  sqsize = 0;
1634  if (is->audio_st)
1635  aqsize = is->audioq.size;
1636  if (is->video_st)
1637  vqsize = is->videoq.size;
1638  if (is->subtitle_st)
1639  sqsize = is->subtitleq.size;
1640  av_diff = 0;
1641  if (is->audio_st && is->video_st)
1642  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1643  else if (is->video_st)
1644  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1645  else if (is->audio_st)
1646  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1648  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1649  get_master_clock(is),
1650  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1651  av_diff,
1653  aqsize / 1024,
1654  vqsize / 1024,
1655  sqsize,
1658  fflush(stdout);
1659  last_time = cur_time;
1660  }
1661  }
1662 }
1663 
1664 /* allocate a picture (needs to do that in main thread to avoid
1665  potential locking problems */
1666 static void alloc_picture(VideoState *is)
1667 {
1668  Frame *vp;
1669  int64_t bufferdiff;
1670 
1671  vp = &is->pictq.queue[is->pictq.windex];
1672 
1673  free_picture(vp);
1674 
1675  video_open(is, 0, vp);
1676 
1677  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1678  SDL_YV12_OVERLAY,
1679  screen);
1680  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1681  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1682  /* SDL allocates a buffer smaller than requested if the video
1683  * overlay hardware is unable to support the requested size. */
1685  "Error: the video system does not support an image\n"
1686  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1687  "to reduce the image size.\n", vp->width, vp->height );
1688  do_exit(is);
1689  }
1690 
1691  SDL_LockMutex(is->pictq.mutex);
1692  vp->allocated = 1;
1693  SDL_CondSignal(is->pictq.cond);
1694  SDL_UnlockMutex(is->pictq.mutex);
1695 }
1696 
1697 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1698  int i, width, height;
1699  Uint8 *p, *maxp;
1700  for (i = 0; i < 3; i++) {
1701  width = bmp->w;
1702  height = bmp->h;
1703  if (i > 0) {
1704  width >>= 1;
1705  height >>= 1;
1706  }
1707  if (bmp->pitches[i] > width) {
1708  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1709  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1710  *(p+1) = *p;
1711  }
1712  }
1713 }
1714 
1715 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1716 {
1717  Frame *vp;
1718 
1719 #if defined(DEBUG_SYNC) && 0
1720  printf("frame_type=%c pts=%0.3f\n",
1721  av_get_picture_type_char(src_frame->pict_type), pts);
1722 #endif
1723 
1724  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1725  return -1;
1726 
1727  vp->sar = src_frame->sample_aspect_ratio;
1728 
1729  /* alloc or resize hardware picture buffer */
1730  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1731  vp->width != src_frame->width ||
1732  vp->height != src_frame->height) {
1733  SDL_Event event;
1734 
1735  vp->allocated = 0;
1736  vp->reallocate = 0;
1737  vp->width = src_frame->width;
1738  vp->height = src_frame->height;
1739 
1740  /* the allocation must be done in the main thread to avoid
1741  locking problems. */
1742  event.type = FF_ALLOC_EVENT;
1743  event.user.data1 = is;
1744  SDL_PushEvent(&event);
1745 
1746  /* wait until the picture is allocated */
1747  SDL_LockMutex(is->pictq.mutex);
1748  while (!vp->allocated && !is->videoq.abort_request) {
1749  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1750  }
1751  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1752  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1753  while (!vp->allocated && !is->abort_request) {
1754  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1755  }
1756  }
1757  SDL_UnlockMutex(is->pictq.mutex);
1758 
1759  if (is->videoq.abort_request)
1760  return -1;
1761  }
1762 
1763  /* if the frame is not skipped, then display it */
1764  if (vp->bmp) {
1765  uint8_t *data[4];
1766  int linesize[4];
1767 
1768  /* get a pointer on the bitmap */
1769  SDL_LockYUVOverlay (vp->bmp);
1770 
1771  data[0] = vp->bmp->pixels[0];
1772  data[1] = vp->bmp->pixels[2];
1773  data[2] = vp->bmp->pixels[1];
1774 
1775  linesize[0] = vp->bmp->pitches[0];
1776  linesize[1] = vp->bmp->pitches[2];
1777  linesize[2] = vp->bmp->pitches[1];
1778 
1779 #if CONFIG_AVFILTER
1780  // FIXME use direct rendering
1781  av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
1782  src_frame->format, vp->width, vp->height);
1783 #else
1784  {
1785  AVDictionaryEntry *e = av_dict_get(sws_dict, "sws_flags", NULL, 0);
1786  if (e) {
1787  const AVClass *class = sws_get_class();
1788  const AVOption *o = av_opt_find(&class, "sws_flags", NULL, 0,
1790  int ret = av_opt_eval_flags(&class, o, e->value, &sws_flags);
1791  if (ret < 0)
1792  exit(1);
1793  }
1794  }
1795 
1797  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1799  if (!is->img_convert_ctx) {
1800  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1801  exit(1);
1802  }
1803  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1804  0, vp->height, data, linesize);
1805 #endif
1806  /* workaround SDL PITCH_WORKAROUND */
1808  /* update the bitmap content */
1809  SDL_UnlockYUVOverlay(vp->bmp);
1810 
1811  vp->pts = pts;
1812  vp->duration = duration;
1813  vp->pos = pos;
1814  vp->serial = serial;
1815 
1816  /* now we can update the picture count */
1817  frame_queue_push(&is->pictq);
1818  }
1819  return 0;
1820 }
1821 
1823 {
1824  int got_picture;
1825 
1826  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1827  return -1;
1828 
1829  if (got_picture) {
1830  double dpts = NAN;
1831 
1832  if (frame->pts != AV_NOPTS_VALUE)
1833  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1834 
1835  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1836 
1837  is->viddec_width = frame->width;
1838  is->viddec_height = frame->height;
1839 
1841  if (frame->pts != AV_NOPTS_VALUE) {
1842  double diff = dpts - get_master_clock(is);
1843  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1844  diff - is->frame_last_filter_delay < 0 &&
1845  is->viddec.pkt_serial == is->vidclk.serial &&
1846  is->videoq.nb_packets) {
1847  is->frame_drops_early++;
1848  av_frame_unref(frame);
1849  got_picture = 0;
1850  }
1851  }
1852  }
1853  }
1854 
1855  return got_picture;
1856 }
1857 
1858 #if CONFIG_AVFILTER
1859 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1860  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1861 {
1862  int ret, i;
1863  int nb_filters = graph->nb_filters;
1865 
1866  if (filtergraph) {
1867  outputs = avfilter_inout_alloc();
1868  inputs = avfilter_inout_alloc();
1869  if (!outputs || !inputs) {
1870  ret = AVERROR(ENOMEM);
1871  goto fail;
1872  }
1873 
1874  outputs->name = av_strdup("in");
1875  outputs->filter_ctx = source_ctx;
1876  outputs->pad_idx = 0;
1877  outputs->next = NULL;
1878 
1879  inputs->name = av_strdup("out");
1880  inputs->filter_ctx = sink_ctx;
1881  inputs->pad_idx = 0;
1882  inputs->next = NULL;
1883 
1884  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1885  goto fail;
1886  } else {
1887  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1888  goto fail;
1889  }
1890 
1891  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1892  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1893  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1894 
1895  ret = avfilter_graph_config(graph, NULL);
1896 fail:
1897  avfilter_inout_free(&outputs);
1898  avfilter_inout_free(&inputs);
1899  return ret;
1900 }
1901 
1902 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1903 {
1904  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1905  char sws_flags_str[512] = "";
1906  char buffersrc_args[256];
1907  int ret;
1908  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1909  AVCodecContext *codec = is->video_st->codec;
1910  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1911  AVDictionaryEntry *e = NULL;
1912 
1913  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1914  if (!strcmp(e->key, "sws_flags")) {
1915  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1916  } else
1917  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1918  }
1919  if (strlen(sws_flags_str))
1920  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1921 
1922  graph->scale_sws_opts = av_strdup(sws_flags_str);
1923 
1924  snprintf(buffersrc_args, sizeof(buffersrc_args),
1925  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1926  frame->width, frame->height, frame->format,
1928  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1929  if (fr.num && fr.den)
1930  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1931 
1932  if ((ret = avfilter_graph_create_filter(&filt_src,
1933  avfilter_get_by_name("buffer"),
1934  "ffplay_buffer", buffersrc_args, NULL,
1935  graph)) < 0)
1936  goto fail;
1937 
1938  ret = avfilter_graph_create_filter(&filt_out,
1939  avfilter_get_by_name("buffersink"),
1940  "ffplay_buffersink", NULL, NULL, graph);
1941  if (ret < 0)
1942  goto fail;
1943 
1944  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1945  goto fail;
1946 
1947  last_filter = filt_out;
1948 
1949 /* Note: this macro adds a filter before the lastly added filter, so the
1950  * processing order of the filters is in reverse */
1951 #define INSERT_FILT(name, arg) do { \
1952  AVFilterContext *filt_ctx; \
1953  \
1954  ret = avfilter_graph_create_filter(&filt_ctx, \
1955  avfilter_get_by_name(name), \
1956  "ffplay_" name, arg, NULL, graph); \
1957  if (ret < 0) \
1958  goto fail; \
1959  \
1960  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1961  if (ret < 0) \
1962  goto fail; \
1963  \
1964  last_filter = filt_ctx; \
1965 } while (0)
1966 
1967  /* SDL YUV code is not handling odd width/height for some driver
1968  * combinations, therefore we crop the picture to an even width/height. */
1969  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
1970 
1971  if (autorotate) {
1972  double theta = get_rotation(is->video_st);
1973 
1974  if (fabs(theta - 90) < 1.0) {
1975  INSERT_FILT("transpose", "clock");
1976  } else if (fabs(theta - 180) < 1.0) {
1977  INSERT_FILT("hflip", NULL);
1978  INSERT_FILT("vflip", NULL);
1979  } else if (fabs(theta - 270) < 1.0) {
1980  INSERT_FILT("transpose", "cclock");
1981  } else if (fabs(theta) > 1.0) {
1982  char rotate_buf[64];
1983  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1984  INSERT_FILT("rotate", rotate_buf);
1985  }
1986  }
1987 
1988  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1989  goto fail;
1990 
1991  is->in_video_filter = filt_src;
1992  is->out_video_filter = filt_out;
1993 
1994 fail:
1995  return ret;
1996 }
1997 
1998 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1999 {
2001  int sample_rates[2] = { 0, -1 };
2002  int64_t channel_layouts[2] = { 0, -1 };
2003  int channels[2] = { 0, -1 };
2004  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2005  char aresample_swr_opts[512] = "";
2006  AVDictionaryEntry *e = NULL;
2007  char asrc_args[256];
2008  int ret;
2009 
2010  avfilter_graph_free(&is->agraph);
2011  if (!(is->agraph = avfilter_graph_alloc()))
2012  return AVERROR(ENOMEM);
2013 
2014  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2015  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2016  if (strlen(aresample_swr_opts))
2017  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2018  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2019 
2020  ret = snprintf(asrc_args, sizeof(asrc_args),
2021  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2022  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2023  is->audio_filter_src.channels,
2024  1, is->audio_filter_src.freq);
2025  if (is->audio_filter_src.channel_layout)
2026  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2027  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2028 
2029  ret = avfilter_graph_create_filter(&filt_asrc,
2030  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2031  asrc_args, NULL, is->agraph);
2032  if (ret < 0)
2033  goto end;
2034 
2035 
2036  ret = avfilter_graph_create_filter(&filt_asink,
2037  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2038  NULL, NULL, is->agraph);
2039  if (ret < 0)
2040  goto end;
2041 
2042  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2043  goto end;
2044  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2045  goto end;
2046 
2047  if (force_output_format) {
2048  channel_layouts[0] = is->audio_tgt.channel_layout;
2049  channels [0] = is->audio_tgt.channels;
2050  sample_rates [0] = is->audio_tgt.freq;
2051  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2052  goto end;
2053  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2054  goto end;
2055  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2056  goto end;
2057  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2058  goto end;
2059  }
2060 
2061 
2062  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2063  goto end;
2064 
2065  is->in_audio_filter = filt_asrc;
2066  is->out_audio_filter = filt_asink;
2067 
2068 end:
2069  if (ret < 0)
2070  avfilter_graph_free(&is->agraph);
2071  return ret;
2072 }
2073 #endif /* CONFIG_AVFILTER */
2074 
2075 static int audio_thread(void *arg)
2076 {
2077  VideoState *is = arg;
2078  AVFrame *frame = av_frame_alloc();
2079  Frame *af;
2080 #if CONFIG_AVFILTER
2081  int last_serial = -1;
2082  int64_t dec_channel_layout;
2083  int reconfigure;
2084 #endif
2085  int got_frame = 0;
2086  AVRational tb;
2087  int ret = 0;
2088 
2089  if (!frame)
2090  return AVERROR(ENOMEM);
2091 
2092  do {
2093  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2094  goto the_end;
2095 
2096  if (got_frame) {
2097  tb = (AVRational){1, frame->sample_rate};
2098 
2099 #if CONFIG_AVFILTER
2100  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2101 
2102  reconfigure =
2103  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2104  frame->format, av_frame_get_channels(frame)) ||
2105  is->audio_filter_src.channel_layout != dec_channel_layout ||
2106  is->audio_filter_src.freq != frame->sample_rate ||
2107  is->auddec.pkt_serial != last_serial;
2108 
2109  if (reconfigure) {
2110  char buf1[1024], buf2[1024];
2111  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2112  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2114  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2115  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2117 
2118  is->audio_filter_src.fmt = frame->format;
2119  is->audio_filter_src.channels = av_frame_get_channels(frame);
2120  is->audio_filter_src.channel_layout = dec_channel_layout;
2121  is->audio_filter_src.freq = frame->sample_rate;
2122  last_serial = is->auddec.pkt_serial;
2123 
2124  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2125  goto the_end;
2126  }
2127 
2128  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2129  goto the_end;
2130 
2131  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2132  tb = is->out_audio_filter->inputs[0]->time_base;
2133 #endif
2134  if (!(af = frame_queue_peek_writable(&is->sampq)))
2135  goto the_end;
2136 
2137  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2138  af->pos = av_frame_get_pkt_pos(frame);
2139  af->serial = is->auddec.pkt_serial;
2140  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2141 
2142  av_frame_move_ref(af->frame, frame);
2143  frame_queue_push(&is->sampq);
2144 
2145 #if CONFIG_AVFILTER
2146  if (is->audioq.serial != is->auddec.pkt_serial)
2147  break;
2148  }
2149  if (ret == AVERROR_EOF)
2150  is->auddec.finished = is->auddec.pkt_serial;
2151 #endif
2152  }
2153  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2154  the_end:
2155 #if CONFIG_AVFILTER
2156  avfilter_graph_free(&is->agraph);
2157 #endif
2158  av_frame_free(&frame);
2159  return ret;
2160 }
2161 
2162 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2163 {
2165  d->decoder_tid = SDL_CreateThread(fn, arg);
2166  if (!d->decoder_tid) {
2167  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2168  return AVERROR(ENOMEM);
2169  }
2170  return 0;
2171 }
2172 
2173 static int video_thread(void *arg)
2174 {
2175  VideoState *is = arg;
2176  AVFrame *frame = av_frame_alloc();
2177  double pts;
2178  double duration;
2179  int ret;
2181  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2182 
2183 #if CONFIG_AVFILTER
2185  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2186  int last_w = 0;
2187  int last_h = 0;
2188  enum AVPixelFormat last_format = -2;
2189  int last_serial = -1;
2190  int last_vfilter_idx = 0;
2191  if (!graph) {
2192  av_frame_free(&frame);
2193  return AVERROR(ENOMEM);
2194  }
2195 
2196 #endif
2197 
2198  if (!frame) {
2199 #if CONFIG_AVFILTER
2200  avfilter_graph_free(&graph);
2201 #endif
2202  return AVERROR(ENOMEM);
2203  }
2204 
2205  for (;;) {
2206  ret = get_video_frame(is, frame);
2207  if (ret < 0)
2208  goto the_end;
2209  if (!ret)
2210  continue;
2211 
2212 #if CONFIG_AVFILTER
2213  if ( last_w != frame->width
2214  || last_h != frame->height
2215  || last_format != frame->format
2216  || last_serial != is->viddec.pkt_serial
2217  || last_vfilter_idx != is->vfilter_idx) {
2219  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2220  last_w, last_h,
2221  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2222  frame->width, frame->height,
2223  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2224  avfilter_graph_free(&graph);
2225  graph = avfilter_graph_alloc();
2226  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2227  SDL_Event event;
2228  event.type = FF_QUIT_EVENT;
2229  event.user.data1 = is;
2230  SDL_PushEvent(&event);
2231  goto the_end;
2232  }
2233  filt_in = is->in_video_filter;
2234  filt_out = is->out_video_filter;
2235  last_w = frame->width;
2236  last_h = frame->height;
2237  last_format = frame->format;
2238  last_serial = is->viddec.pkt_serial;
2239  last_vfilter_idx = is->vfilter_idx;
2240  frame_rate = filt_out->inputs[0]->frame_rate;
2241  }
2242 
2243  ret = av_buffersrc_add_frame(filt_in, frame);
2244  if (ret < 0)
2245  goto the_end;
2246 
2247  while (ret >= 0) {
2248  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2249 
2250  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2251  if (ret < 0) {
2252  if (ret == AVERROR_EOF)
2253  is->viddec.finished = is->viddec.pkt_serial;
2254  ret = 0;
2255  break;
2256  }
2257 
2259  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2260  is->frame_last_filter_delay = 0;
2261  tb = filt_out->inputs[0]->time_base;
2262 #endif
2263  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2264  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2265  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2266  av_frame_unref(frame);
2267 #if CONFIG_AVFILTER
2268  }
2269 #endif
2270 
2271  if (ret < 0)
2272  goto the_end;
2273  }
2274  the_end:
2275 #if CONFIG_AVFILTER
2276  avfilter_graph_free(&graph);
2277 #endif
2278  av_frame_free(&frame);
2279  return 0;
2280 }
2281 
2282 static int subtitle_thread(void *arg)
2283 {
2284  VideoState *is = arg;
2285  Frame *sp;
2286  int got_subtitle;
2287  double pts;
2288  int i;
2289 
2290  for (;;) {
2291  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2292  return 0;
2293 
2294  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2295  break;
2296 
2297  pts = 0;
2298 
2299  if (got_subtitle && sp->sub.format == 0) {
2300  if (sp->sub.pts != AV_NOPTS_VALUE)
2301  pts = sp->sub.pts / (double)AV_TIME_BASE;
2302  sp->pts = pts;
2303  sp->serial = is->subdec.pkt_serial;
2304  if (!(sp->subrects = av_mallocz_array(sp->sub.num_rects, sizeof(AVSubtitleRect*)))) {
2305  av_log(NULL, AV_LOG_FATAL, "Cannot allocate subrects\n");
2306  exit(1);
2307  }
2308 
2309  for (i = 0; i < sp->sub.num_rects; i++)
2310  {
2311  int in_w = sp->sub.rects[i]->w;
2312  int in_h = sp->sub.rects[i]->h;
2313  int subw = is->subdec.avctx->width ? is->subdec.avctx->width : is->viddec_width;
2314  int subh = is->subdec.avctx->height ? is->subdec.avctx->height : is->viddec_height;
2315  int out_w = is->viddec_width ? in_w * is->viddec_width / subw : in_w;
2316  int out_h = is->viddec_height ? in_h * is->viddec_height / subh : in_h;
2317 
2318  if (!(sp->subrects[i] = av_mallocz(sizeof(AVSubtitleRect))) ||
2319  av_image_alloc(sp->subrects[i]->data, sp->subrects[i]->linesize, out_w, out_h, AV_PIX_FMT_YUVA420P, 16) < 0) {
2320  av_log(NULL, AV_LOG_FATAL, "Cannot allocate subtitle data\n");
2321  exit(1);
2322  }
2323 
2325  in_w, in_h, AV_PIX_FMT_PAL8, out_w, out_h,
2327  if (!is->sub_convert_ctx) {
2328  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the sub conversion context\n");
2329  exit(1);
2330  }
2332  (void*)sp->sub.rects[i]->data, sp->sub.rects[i]->linesize,
2333  0, in_h, sp->subrects[i]->data, sp->subrects[i]->linesize);
2334 
2335  sp->subrects[i]->w = out_w;
2336  sp->subrects[i]->h = out_h;
2337  sp->subrects[i]->x = sp->sub.rects[i]->x * out_w / in_w;
2338  sp->subrects[i]->y = sp->sub.rects[i]->y * out_h / in_h;
2339  }
2340 
2341  /* now we can update the picture count */
2342  frame_queue_push(&is->subpq);
2343  } else if (got_subtitle) {
2344  avsubtitle_free(&sp->sub);
2345  }
2346  }
2347  return 0;
2348 }
2349 
2350 /* copy samples for viewing in editor window */
2351 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2352 {
2353  int size, len;
2354 
2355  size = samples_size / sizeof(short);
2356  while (size > 0) {
2358  if (len > size)
2359  len = size;
2360  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2361  samples += len;
2362  is->sample_array_index += len;
2364  is->sample_array_index = 0;
2365  size -= len;
2366  }
2367 }
2368 
2369 /* return the wanted number of samples to get better sync if sync_type is video
2370  * or external master clock */
2371 static int synchronize_audio(VideoState *is, int nb_samples)
2372 {
2373  int wanted_nb_samples = nb_samples;
2374 
2375  /* if not master, then we try to remove or add samples to correct the clock */
2377  double diff, avg_diff;
2378  int min_nb_samples, max_nb_samples;
2379 
2380  diff = get_clock(&is->audclk) - get_master_clock(is);
2381 
2382  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2383  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2385  /* not enough measures to have a correct estimate */
2386  is->audio_diff_avg_count++;
2387  } else {
2388  /* estimate the A-V difference */
2389  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2390 
2391  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2392  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2393  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2394  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2395  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2396  }
2397  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2398  diff, avg_diff, wanted_nb_samples - nb_samples,
2400  }
2401  } else {
2402  /* too big difference : may be initial PTS errors, so
2403  reset A-V filter */
2404  is->audio_diff_avg_count = 0;
2405  is->audio_diff_cum = 0;
2406  }
2407  }
2408 
2409  return wanted_nb_samples;
2410 }
2411 
2412 /**
2413  * Decode one audio frame and return its uncompressed size.
2414  *
2415  * The processed audio frame is decoded, converted if required, and
2416  * stored in is->audio_buf, with size in bytes given by the return
2417  * value.
2418  */
2420 {
2421  int data_size, resampled_data_size;
2422  int64_t dec_channel_layout;
2423  av_unused double audio_clock0;
2424  int wanted_nb_samples;
2425  Frame *af;
2426 
2427  if (is->paused)
2428  return -1;
2429 
2430  do {
2431 #if defined(_WIN32)
2432  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2434  return -1;
2435  av_usleep (1000);
2436  }
2437 #endif
2438  if (!(af = frame_queue_peek_readable(&is->sampq)))
2439  return -1;
2440  frame_queue_next(&is->sampq);
2441  } while (af->serial != is->audioq.serial);
2442 
2444  af->frame->nb_samples,
2445  af->frame->format, 1);
2446 
2447  dec_channel_layout =
2450  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2451 
2452  if (af->frame->format != is->audio_src.fmt ||
2453  dec_channel_layout != is->audio_src.channel_layout ||
2454  af->frame->sample_rate != is->audio_src.freq ||
2455  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2456  swr_free(&is->swr_ctx);
2459  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2460  0, NULL);
2461  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2463  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2466  swr_free(&is->swr_ctx);
2467  return -1;
2468  }
2469  is->audio_src.channel_layout = dec_channel_layout;
2471  is->audio_src.freq = af->frame->sample_rate;
2472  is->audio_src.fmt = af->frame->format;
2473  }
2474 
2475  if (is->swr_ctx) {
2476  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2477  uint8_t **out = &is->audio_buf1;
2478  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2479  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2480  int len2;
2481  if (out_size < 0) {
2482  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2483  return -1;
2484  }
2485  if (wanted_nb_samples != af->frame->nb_samples) {
2486  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2487  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2488  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2489  return -1;
2490  }
2491  }
2492  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2493  if (!is->audio_buf1)
2494  return AVERROR(ENOMEM);
2495  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2496  if (len2 < 0) {
2497  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2498  return -1;
2499  }
2500  if (len2 == out_count) {
2501  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2502  if (swr_init(is->swr_ctx) < 0)
2503  swr_free(&is->swr_ctx);
2504  }
2505  is->audio_buf = is->audio_buf1;
2506  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2507  } else {
2508  is->audio_buf = af->frame->data[0];
2509  resampled_data_size = data_size;
2510  }
2511 
2512  audio_clock0 = is->audio_clock;
2513  /* update the audio clock with the pts */
2514  if (!isnan(af->pts))
2515  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2516  else
2517  is->audio_clock = NAN;
2518  is->audio_clock_serial = af->serial;
2519 #ifdef DEBUG
2520  {
2521  static double last_clock;
2522  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2523  is->audio_clock - last_clock,
2524  is->audio_clock, audio_clock0);
2525  last_clock = is->audio_clock;
2526  }
2527 #endif
2528  return resampled_data_size;
2529 }
2530 
2531 /* prepare a new audio buffer */
2532 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2533 {
2534  VideoState *is = opaque;
2535  int audio_size, len1;
2536 
2538 
2539  while (len > 0) {
2540  if (is->audio_buf_index >= is->audio_buf_size) {
2541  audio_size = audio_decode_frame(is);
2542  if (audio_size < 0) {
2543  /* if error, just output silence */
2544  is->audio_buf = is->silence_buf;
2545  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2546  } else {
2547  if (is->show_mode != SHOW_MODE_VIDEO)
2548  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2549  is->audio_buf_size = audio_size;
2550  }
2551  is->audio_buf_index = 0;
2552  }
2553  len1 = is->audio_buf_size - is->audio_buf_index;
2554  if (len1 > len)
2555  len1 = len;
2556  if (!is->muted && is->audio_volume == SDL_MIX_MAXVOLUME)
2557  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2558  else {
2559  memset(stream, is->silence_buf[0], len1);
2560  if (!is->muted)
2561  SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
2562  }
2563  len -= len1;
2564  stream += len1;
2565  is->audio_buf_index += len1;
2566  }
2568  /* Let's assume the audio driver that is used by SDL has two periods. */
2569  if (!isnan(is->audio_clock)) {
2571  sync_clock_to_slave(&is->extclk, &is->audclk);
2572  }
2573 }
2574 
2575 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2576 {
2577  SDL_AudioSpec wanted_spec, spec;
2578  const char *env;
2579  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2580  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2581  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2582 
2583  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2584  if (env) {
2585  wanted_nb_channels = atoi(env);
2586  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2587  }
2588  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2589  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2590  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2591  }
2592  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2593  wanted_spec.channels = wanted_nb_channels;
2594  wanted_spec.freq = wanted_sample_rate;
2595  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2596  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2597  return -1;
2598  }
2599  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2600  next_sample_rate_idx--;
2601  wanted_spec.format = AUDIO_S16SYS;
2602  wanted_spec.silence = 0;
2603  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2604  wanted_spec.callback = sdl_audio_callback;
2605  wanted_spec.userdata = opaque;
2606  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2607  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2608  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2609  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2610  if (!wanted_spec.channels) {
2611  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2612  wanted_spec.channels = wanted_nb_channels;
2613  if (!wanted_spec.freq) {
2615  "No more combinations to try, audio open failed\n");
2616  return -1;
2617  }
2618  }
2619  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2620  }
2621  if (spec.format != AUDIO_S16SYS) {
2623  "SDL advised audio format %d is not supported!\n", spec.format);
2624  return -1;
2625  }
2626  if (spec.channels != wanted_spec.channels) {
2627  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2628  if (!wanted_channel_layout) {
2630  "SDL advised channel count %d is not supported!\n", spec.channels);
2631  return -1;
2632  }
2633  }
2634 
2635  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2636  audio_hw_params->freq = spec.freq;
2637  audio_hw_params->channel_layout = wanted_channel_layout;
2638  audio_hw_params->channels = spec.channels;
2639  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2640  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2641  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2642  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2643  return -1;
2644  }
2645  return spec.size;
2646 }
2647 
2648 /* open a given stream. Return 0 if OK */
2649 static int stream_component_open(VideoState *is, int stream_index)
2650 {
2651  AVFormatContext *ic = is->ic;
2652  AVCodecContext *avctx;
2653  AVCodec *codec;
2654  const char *forced_codec_name = NULL;
2655  AVDictionary *opts;
2656  AVDictionaryEntry *t = NULL;
2657  int sample_rate, nb_channels;
2658  int64_t channel_layout;
2659  int ret = 0;
2660  int stream_lowres = lowres;
2661 
2662  if (stream_index < 0 || stream_index >= ic->nb_streams)
2663  return -1;
2664  avctx = ic->streams[stream_index]->codec;
2665 
2666  codec = avcodec_find_decoder(avctx->codec_id);
2667 
2668  switch(avctx->codec_type){
2669  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2670  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2671  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2672  }
2673  if (forced_codec_name)
2674  codec = avcodec_find_decoder_by_name(forced_codec_name);
2675  if (!codec) {
2676  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2677  "No codec could be found with name '%s'\n", forced_codec_name);
2678  else av_log(NULL, AV_LOG_WARNING,
2679  "No codec could be found with id %d\n", avctx->codec_id);
2680  return -1;
2681  }
2682 
2683  avctx->codec_id = codec->id;
2684  if(stream_lowres > av_codec_get_max_lowres(codec)){
2685  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2686  av_codec_get_max_lowres(codec));
2687  stream_lowres = av_codec_get_max_lowres(codec);
2688  }
2689  av_codec_set_lowres(avctx, stream_lowres);
2690 
2691 #if FF_API_EMU_EDGE
2692  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2693 #endif
2694  if (fast)
2695  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2696 #if FF_API_EMU_EDGE
2697  if(codec->capabilities & AV_CODEC_CAP_DR1)
2698  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2699 #endif
2700 
2701  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2702  if (!av_dict_get(opts, "threads", NULL, 0))
2703  av_dict_set(&opts, "threads", "auto", 0);
2704  if (stream_lowres)
2705  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2706  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2707  av_dict_set(&opts, "refcounted_frames", "1", 0);
2708  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2709  goto fail;
2710  }
2711  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2712  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2714  goto fail;
2715  }
2716 
2717  is->eof = 0;
2718  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2719  switch (avctx->codec_type) {
2720  case AVMEDIA_TYPE_AUDIO:
2721 #if CONFIG_AVFILTER
2722  {
2723  AVFilterLink *link;
2724 
2725  is->audio_filter_src.freq = avctx->sample_rate;
2726  is->audio_filter_src.channels = avctx->channels;
2727  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2728  is->audio_filter_src.fmt = avctx->sample_fmt;
2729  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2730  goto fail;
2731  link = is->out_audio_filter->inputs[0];
2732  sample_rate = link->sample_rate;
2733  nb_channels = link->channels;
2734  channel_layout = link->channel_layout;
2735  }
2736 #else
2737  sample_rate = avctx->sample_rate;
2738  nb_channels = avctx->channels;
2739  channel_layout = avctx->channel_layout;
2740 #endif
2741 
2742  /* prepare audio output */
2743  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2744  goto fail;
2745  is->audio_hw_buf_size = ret;
2746  is->audio_src = is->audio_tgt;
2747  is->audio_buf_size = 0;
2748  is->audio_buf_index = 0;
2749 
2750  /* init averaging filter */
2751  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2752  is->audio_diff_avg_count = 0;
2753  /* since we do not have a precise anough audio fifo fullness,
2754  we correct audio sync only if larger than this threshold */
2756 
2757  is->audio_stream = stream_index;
2758  is->audio_st = ic->streams[stream_index];
2759 
2760  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2762  is->auddec.start_pts = is->audio_st->start_time;
2764  }
2765  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2766  goto fail;
2767  SDL_PauseAudio(0);
2768  break;
2769  case AVMEDIA_TYPE_VIDEO:
2770  is->video_stream = stream_index;
2771  is->video_st = ic->streams[stream_index];
2772 
2773  is->viddec_width = avctx->width;
2774  is->viddec_height = avctx->height;
2775 
2776  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2777  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2778  goto fail;
2779  is->queue_attachments_req = 1;
2780  break;
2781  case AVMEDIA_TYPE_SUBTITLE:
2782  is->subtitle_stream = stream_index;
2783  is->subtitle_st = ic->streams[stream_index];
2784 
2785  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2786  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2787  goto fail;
2788  break;
2789  default:
2790  break;
2791  }
2792 
2793 fail:
2794  av_dict_free(&opts);
2795 
2796  return ret;
2797 }
2798 
2799 static int decode_interrupt_cb(void *ctx)
2800 {
2801  VideoState *is = ctx;
2802  return is->abort_request;
2803 }
2804 
2806 {
2807  if( !strcmp(s->iformat->name, "rtp")
2808  || !strcmp(s->iformat->name, "rtsp")
2809  || !strcmp(s->iformat->name, "sdp")
2810  )
2811  return 1;
2812 
2813  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2814  || !strncmp(s->filename, "udp:", 4)
2815  )
2816  )
2817  return 1;
2818  return 0;
2819 }
2820 
2821 /* this thread gets the stream from the disk or the network */
2822 static int read_thread(void *arg)
2823 {
2824  VideoState *is = arg;
2825  AVFormatContext *ic = NULL;
2826  int err, i, ret;
2827  int st_index[AVMEDIA_TYPE_NB];
2828  AVPacket pkt1, *pkt = &pkt1;
2829  int64_t stream_start_time;
2830  int pkt_in_play_range = 0;
2831  AVDictionaryEntry *t;
2832  AVDictionary **opts;
2833  int orig_nb_streams;
2834  SDL_mutex *wait_mutex = SDL_CreateMutex();
2835  int scan_all_pmts_set = 0;
2836  int64_t pkt_ts;
2837 
2838  if (!wait_mutex) {
2839  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2840  ret = AVERROR(ENOMEM);
2841  goto fail;
2842  }
2843 
2844  memset(st_index, -1, sizeof(st_index));
2845  is->last_video_stream = is->video_stream = -1;
2846  is->last_audio_stream = is->audio_stream = -1;
2847  is->last_subtitle_stream = is->subtitle_stream = -1;
2848  is->eof = 0;
2849 
2850  ic = avformat_alloc_context();
2851  if (!ic) {
2852  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2853  ret = AVERROR(ENOMEM);
2854  goto fail;
2855  }
2857  ic->interrupt_callback.opaque = is;
2858  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2859  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2860  scan_all_pmts_set = 1;
2861  }
2862  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2863  if (err < 0) {
2864  print_error(is->filename, err);
2865  ret = -1;
2866  goto fail;
2867  }
2868  if (scan_all_pmts_set)
2869  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2870 
2872  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2874  goto fail;
2875  }
2876  is->ic = ic;
2877 
2878  if (genpts)
2879  ic->flags |= AVFMT_FLAG_GENPTS;
2880 
2882 
2884  orig_nb_streams = ic->nb_streams;
2885 
2886  err = avformat_find_stream_info(ic, opts);
2887 
2888  for (i = 0; i < orig_nb_streams; i++)
2889  av_dict_free(&opts[i]);
2890  av_freep(&opts);
2891 
2892  if (err < 0) {
2894  "%s: could not find codec parameters\n", is->filename);
2895  ret = -1;
2896  goto fail;
2897  }
2898 
2899  if (ic->pb)
2900  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2901 
2902  if (seek_by_bytes < 0)
2903  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2904 
2905  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2906 
2907  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2908  window_title = av_asprintf("%s - %s", t->value, input_filename);
2909 
2910  /* if seeking requested, we execute it */
2911  if (start_time != AV_NOPTS_VALUE) {
2912  int64_t timestamp;
2913 
2914  timestamp = start_time;
2915  /* add the stream start time */
2916  if (ic->start_time != AV_NOPTS_VALUE)
2917  timestamp += ic->start_time;
2918  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2919  if (ret < 0) {
2920  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2921  is->filename, (double)timestamp / AV_TIME_BASE);
2922  }
2923  }
2924 
2925  is->realtime = is_realtime(ic);
2926 
2927  if (show_status)
2928  av_dump_format(ic, 0, is->filename, 0);
2929 
2930  for (i = 0; i < ic->nb_streams; i++) {
2931  AVStream *st = ic->streams[i];
2932  enum AVMediaType type = st->codec->codec_type;
2933  st->discard = AVDISCARD_ALL;
2934  if (wanted_stream_spec[type] && st_index[type] == -1)
2935  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2936  st_index[type] = i;
2937  }
2938  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2939  if (wanted_stream_spec[i] && st_index[i] == -1) {
2940  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2941  st_index[i] = INT_MAX;
2942  }
2943  }
2944 
2945  if (!video_disable)
2946  st_index[AVMEDIA_TYPE_VIDEO] =
2948  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2949  if (!audio_disable)
2950  st_index[AVMEDIA_TYPE_AUDIO] =
2952  st_index[AVMEDIA_TYPE_AUDIO],
2953  st_index[AVMEDIA_TYPE_VIDEO],
2954  NULL, 0);
2956  st_index[AVMEDIA_TYPE_SUBTITLE] =
2958  st_index[AVMEDIA_TYPE_SUBTITLE],
2959  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2960  st_index[AVMEDIA_TYPE_AUDIO] :
2961  st_index[AVMEDIA_TYPE_VIDEO]),
2962  NULL, 0);
2963 
2964  is->show_mode = show_mode;
2965  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2966  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2967  AVCodecContext *avctx = st->codec;
2969  if (avctx->width)
2970  set_default_window_size(avctx->width, avctx->height, sar);
2971  }
2972 
2973  /* open the streams */
2974  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2975  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2976  }
2977 
2978  ret = -1;
2979  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2980  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2981  }
2982  if (is->show_mode == SHOW_MODE_NONE)
2983  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2984 
2985  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2986  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2987  }
2988 
2989  if (is->video_stream < 0 && is->audio_stream < 0) {
2990  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2991  is->filename);
2992  ret = -1;
2993  goto fail;
2994  }
2995 
2996  if (infinite_buffer < 0 && is->realtime)
2997  infinite_buffer = 1;
2998 
2999  for (;;) {
3000  if (is->abort_request)
3001  break;
3002  if (is->paused != is->last_paused) {
3003  is->last_paused = is->paused;
3004  if (is->paused)
3005  is->read_pause_return = av_read_pause(ic);
3006  else
3007  av_read_play(ic);
3008  }
3009 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3010  if (is->paused &&
3011  (!strcmp(ic->iformat->name, "rtsp") ||
3012  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3013  /* wait 10 ms to avoid trying to get another packet */
3014  /* XXX: horrible */
3015  SDL_Delay(10);
3016  continue;
3017  }
3018 #endif
3019  if (is->seek_req) {
3020  int64_t seek_target = is->seek_pos;
3021  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3022  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3023 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3024 // of the seek_pos/seek_rel variables
3025 
3026  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3027  if (ret < 0) {
3029  "%s: error while seeking\n", is->ic->filename);
3030  } else {
3031  if (is->audio_stream >= 0) {
3032  packet_queue_flush(&is->audioq);
3033  packet_queue_put(&is->audioq, &flush_pkt);
3034  }
3035  if (is->subtitle_stream >= 0) {
3037  packet_queue_put(&is->subtitleq, &flush_pkt);
3038  }
3039  if (is->video_stream >= 0) {
3040  packet_queue_flush(&is->videoq);
3041  packet_queue_put(&is->videoq, &flush_pkt);
3042  }
3043  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3044  set_clock(&is->extclk, NAN, 0);
3045  } else {
3046  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3047  }
3048  }
3049  is->seek_req = 0;
3050  is->queue_attachments_req = 1;
3051  is->eof = 0;
3052  if (is->paused)
3053  step_to_next_frame(is);
3054  }
3055  if (is->queue_attachments_req) {
3057  AVPacket copy;
3058  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3059  goto fail;
3060  packet_queue_put(&is->videoq, &copy);
3062  }
3063  is->queue_attachments_req = 0;
3064  }
3065 
3066  /* if the queue are full, no need to read more */
3067  if (infinite_buffer<1 &&
3068  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3069  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3070  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3072  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3073  /* wait 10 ms */
3074  SDL_LockMutex(wait_mutex);
3075  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3076  SDL_UnlockMutex(wait_mutex);
3077  continue;
3078  }
3079  if (!is->paused &&
3080  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3081  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3082  if (loop != 1 && (!loop || --loop)) {
3083  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3084  } else if (autoexit) {
3085  ret = AVERROR_EOF;
3086  goto fail;
3087  }
3088  }
3089  ret = av_read_frame(ic, pkt);
3090  if (ret < 0) {
3091  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3092  if (is->video_stream >= 0)
3094  if (is->audio_stream >= 0)
3096  if (is->subtitle_stream >= 0)
3098  is->eof = 1;
3099  }
3100  if (ic->pb && ic->pb->error)
3101  break;
3102  SDL_LockMutex(wait_mutex);
3103  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3104  SDL_UnlockMutex(wait_mutex);
3105  continue;
3106  } else {
3107  is->eof = 0;
3108  }
3109  /* check if packet is in play range specified by user, then queue, otherwise discard */
3110  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3111  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3112  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3113  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3114  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3115  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3116  <= ((double)duration / 1000000);
3117  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3118  packet_queue_put(&is->audioq, pkt);
3119  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3121  packet_queue_put(&is->videoq, pkt);
3122  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3123  packet_queue_put(&is->subtitleq, pkt);
3124  } else {
3125  av_packet_unref(pkt);
3126  }
3127  }
3128 
3129  ret = 0;
3130  fail:
3131  if (ic && !is->ic)
3132  avformat_close_input(&ic);
3133 
3134  if (ret != 0) {
3135  SDL_Event event;
3136 
3137  event.type = FF_QUIT_EVENT;
3138  event.user.data1 = is;
3139  SDL_PushEvent(&event);
3140  }
3141  SDL_DestroyMutex(wait_mutex);
3142  return 0;
3143 }
3144 
3145 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3146 {
3147  VideoState *is;
3148 
3149  is = av_mallocz(sizeof(VideoState));
3150  if (!is)
3151  return NULL;
3152  is->filename = av_strdup(filename);
3153  if (!is->filename)
3154  goto fail;
3155  is->iformat = iformat;
3156  is->ytop = 0;
3157  is->xleft = 0;
3158 
3159  /* start video display */
3160  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3161  goto fail;
3162  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3163  goto fail;
3164  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3165  goto fail;
3166 
3167  if (packet_queue_init(&is->videoq) < 0 ||
3168  packet_queue_init(&is->audioq) < 0 ||
3169  packet_queue_init(&is->subtitleq) < 0)
3170  goto fail;
3171 
3172  if (!(is->continue_read_thread = SDL_CreateCond())) {
3173  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3174  goto fail;
3175  }
3176 
3177  init_clock(&is->vidclk, &is->videoq.serial);
3178  init_clock(&is->audclk, &is->audioq.serial);
3179  init_clock(&is->extclk, &is->extclk.serial);
3180  is->audio_clock_serial = -1;
3181  is->audio_volume = SDL_MIX_MAXVOLUME;
3182  is->muted = 0;
3183  is->av_sync_type = av_sync_type;
3184  is->read_tid = SDL_CreateThread(read_thread, is);
3185  if (!is->read_tid) {
3186  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3187 fail:
3188  stream_close(is);
3189  return NULL;
3190  }
3191  return is;
3192 }
3193 
3195 {
3196  AVFormatContext *ic = is->ic;
3197  int start_index, stream_index;
3198  int old_index;
3199  AVStream *st;
3200  AVProgram *p = NULL;
3201  int nb_streams = is->ic->nb_streams;
3202 
3203  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3204  start_index = is->last_video_stream;
3205  old_index = is->video_stream;
3206  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3207  start_index = is->last_audio_stream;
3208  old_index = is->audio_stream;
3209  } else {
3210  start_index = is->last_subtitle_stream;
3211  old_index = is->subtitle_stream;
3212  }
3213  stream_index = start_index;
3214 
3215  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3217  if (p) {
3218  nb_streams = p->nb_stream_indexes;
3219  for (start_index = 0; start_index < nb_streams; start_index++)
3220  if (p->stream_index[start_index] == stream_index)
3221  break;
3222  if (start_index == nb_streams)
3223  start_index = -1;
3224  stream_index = start_index;
3225  }
3226  }
3227 
3228  for (;;) {
3229  if (++stream_index >= nb_streams)
3230  {
3231  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3232  {
3233  stream_index = -1;
3234  is->last_subtitle_stream = -1;
3235  goto the_end;
3236  }
3237  if (start_index == -1)
3238  return;
3239  stream_index = 0;
3240  }
3241  if (stream_index == start_index)
3242  return;
3243  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3244  if (st->codec->codec_type == codec_type) {
3245  /* check that parameters are OK */
3246  switch (codec_type) {
3247  case AVMEDIA_TYPE_AUDIO:
3248  if (st->codec->sample_rate != 0 &&
3249  st->codec->channels != 0)
3250  goto the_end;
3251  break;
3252  case AVMEDIA_TYPE_VIDEO:
3253  case AVMEDIA_TYPE_SUBTITLE:
3254  goto the_end;
3255  default:
3256  break;
3257  }
3258  }
3259  }
3260  the_end:
3261  if (p && stream_index != -1)
3262  stream_index = p->stream_index[stream_index];
3263  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3264  av_get_media_type_string(codec_type),
3265  old_index,
3266  stream_index);
3267 
3268  stream_component_close(is, old_index);
3269  stream_component_open(is, stream_index);
3270 }
3271 
3272 
3274 {
3275 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3276  /* OS X needs to reallocate the SDL overlays */
3277  int i;
3278  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3279  is->pictq.queue[i].reallocate = 1;
3280 #endif
3282  video_open(is, 1, NULL);
3283 }
3284 
3286 {
3287  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3288  int next = is->show_mode;
3289  do {
3290  next = (next + 1) % SHOW_MODE_NB;
3291  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3292  if (is->show_mode != next) {
3294  is->xleft, is->ytop, is->width, is->height,
3295  bgcolor, 1);
3296  is->force_refresh = 1;
3297  is->show_mode = next;
3298  }
3299 }
3300 
3301 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3302  double remaining_time = 0.0;
3303  SDL_PumpEvents();
3304  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3306  SDL_ShowCursor(0);
3307  cursor_hidden = 1;
3308  }
3309  if (remaining_time > 0.0)
3310  av_usleep((int64_t)(remaining_time * 1000000.0));
3311  remaining_time = REFRESH_RATE;
3312  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3313  video_refresh(is, &remaining_time);
3314  SDL_PumpEvents();
3315  }
3316 }
3317 
3318 static void seek_chapter(VideoState *is, int incr)
3319 {
3320  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3321  int i;
3322 
3323  if (!is->ic->nb_chapters)
3324  return;
3325 
3326  /* find the current chapter */
3327  for (i = 0; i < is->ic->nb_chapters; i++) {
3328  AVChapter *ch = is->ic->chapters[i];
3329  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3330  i--;
3331  break;
3332  }
3333  }
3334 
3335  i += incr;
3336  i = FFMAX(i, 0);
3337  if (i >= is->ic->nb_chapters)
3338  return;
3339 
3340  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3341  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3342  AV_TIME_BASE_Q), 0, 0);
3343 }
3344 
3345 /* handle an event sent by the GUI */
3346 static void event_loop(VideoState *cur_stream)
3347 {
3348  SDL_Event event;
3349  double incr, pos, frac;
3350 
3351  for (;;) {
3352  double x;
3353  refresh_loop_wait_event(cur_stream, &event);
3354  switch (event.type) {
3355  case SDL_KEYDOWN:
3356  if (exit_on_keydown) {
3357  do_exit(cur_stream);
3358  break;
3359  }
3360  switch (event.key.keysym.sym) {
3361  case SDLK_ESCAPE:
3362  case SDLK_q:
3363  do_exit(cur_stream);
3364  break;
3365  case SDLK_f:
3366  toggle_full_screen(cur_stream);
3367  cur_stream->force_refresh = 1;
3368  break;
3369  case SDLK_p:
3370  case SDLK_SPACE:
3371  toggle_pause(cur_stream);
3372  break;
3373  case SDLK_m:
3374  toggle_mute(cur_stream);
3375  break;
3376  case SDLK_KP_MULTIPLY:
3377  case SDLK_0:
3378  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3379  break;
3380  case SDLK_KP_DIVIDE:
3381  case SDLK_9:
3382  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3383  break;
3384  case SDLK_s: // S: Step to next frame
3385  step_to_next_frame(cur_stream);
3386  break;
3387  case SDLK_a:
3389  break;
3390  case SDLK_v:
3392  break;
3393  case SDLK_c:
3397  break;
3398  case SDLK_t:
3400  break;
3401  case SDLK_w:
3402 #if CONFIG_AVFILTER
3403  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3404  if (++cur_stream->vfilter_idx >= nb_vfilters)
3405  cur_stream->vfilter_idx = 0;
3406  } else {
3407  cur_stream->vfilter_idx = 0;
3408  toggle_audio_display(cur_stream);
3409  }
3410 #else
3411  toggle_audio_display(cur_stream);
3412 #endif
3413  break;
3414  case SDLK_PAGEUP:
3415  if (cur_stream->ic->nb_chapters <= 1) {
3416  incr = 600.0;
3417  goto do_seek;
3418  }
3419  seek_chapter(cur_stream, 1);
3420  break;
3421  case SDLK_PAGEDOWN:
3422  if (cur_stream->ic->nb_chapters <= 1) {
3423  incr = -600.0;
3424  goto do_seek;
3425  }
3426  seek_chapter(cur_stream, -1);
3427  break;
3428  case SDLK_LEFT:
3429  incr = -10.0;
3430  goto do_seek;
3431  case SDLK_RIGHT:
3432  incr = 10.0;
3433  goto do_seek;
3434  case SDLK_UP:
3435  incr = 60.0;
3436  goto do_seek;
3437  case SDLK_DOWN:
3438  incr = -60.0;
3439  do_seek:
3440  if (seek_by_bytes) {
3441  pos = -1;
3442  if (pos < 0 && cur_stream->video_stream >= 0)
3443  pos = frame_queue_last_pos(&cur_stream->pictq);
3444  if (pos < 0 && cur_stream->audio_stream >= 0)
3445  pos = frame_queue_last_pos(&cur_stream->sampq);
3446  if (pos < 0)
3447  pos = avio_tell(cur_stream->ic->pb);
3448  if (cur_stream->ic->bit_rate)
3449  incr *= cur_stream->ic->bit_rate / 8.0;
3450  else
3451  incr *= 180000.0;
3452  pos += incr;
3453  stream_seek(cur_stream, pos, incr, 1);
3454  } else {
3455  pos = get_master_clock(cur_stream);
3456  if (isnan(pos))
3457  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3458  pos += incr;
3459  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3460  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3461  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3462  }
3463  break;
3464  default:
3465  break;
3466  }
3467  break;
3468  case SDL_VIDEOEXPOSE:
3469  cur_stream->force_refresh = 1;
3470  break;
3471  case SDL_MOUSEBUTTONDOWN:
3472  if (exit_on_mousedown) {
3473  do_exit(cur_stream);
3474  break;
3475  }
3476  if (event.button.button == SDL_BUTTON_LEFT) {
3477  static int64_t last_mouse_left_click = 0;
3478  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3479  toggle_full_screen(cur_stream);
3480  cur_stream->force_refresh = 1;
3481  last_mouse_left_click = 0;
3482  } else {
3483  last_mouse_left_click = av_gettime_relative();
3484  }
3485  }
3486  case SDL_MOUSEMOTION:
3487  if (cursor_hidden) {
3488  SDL_ShowCursor(1);
3489  cursor_hidden = 0;
3490  }
3492  if (event.type == SDL_MOUSEBUTTONDOWN) {
3493  if (event.button.button != SDL_BUTTON_RIGHT)
3494  break;
3495  x = event.button.x;
3496  } else {
3497  if (!(event.motion.state & SDL_BUTTON_RMASK))
3498  break;
3499  x = event.motion.x;
3500  }
3501  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3502  uint64_t size = avio_size(cur_stream->ic->pb);
3503  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3504  } else {
3505  int64_t ts;
3506  int ns, hh, mm, ss;
3507  int tns, thh, tmm, tss;
3508  tns = cur_stream->ic->duration / 1000000LL;
3509  thh = tns / 3600;
3510  tmm = (tns % 3600) / 60;
3511  tss = (tns % 60);
3512  frac = x / cur_stream->width;
3513  ns = frac * tns;
3514  hh = ns / 3600;
3515  mm = (ns % 3600) / 60;
3516  ss = (ns % 60);
3518  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3519  hh, mm, ss, thh, tmm, tss);
3520  ts = frac * cur_stream->ic->duration;
3521  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3522  ts += cur_stream->ic->start_time;
3523  stream_seek(cur_stream, ts, 0, 0);
3524  }
3525  break;
3526  case SDL_VIDEORESIZE:
3527  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3528  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3529  if (!screen) {
3530  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3531  do_exit(cur_stream);
3532  }
3533  screen_width = cur_stream->width = screen->w;
3534  screen_height = cur_stream->height = screen->h;
3535  cur_stream->force_refresh = 1;
3536  break;
3537  case SDL_QUIT:
3538  case FF_QUIT_EVENT:
3539  do_exit(cur_stream);
3540  break;
3541  case FF_ALLOC_EVENT:
3542  alloc_picture(event.user.data1);
3543  break;
3544  default:
3545  break;
3546  }
3547  }
3548 }
3549 
3550 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3551 {
3552  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3553  return opt_default(NULL, "video_size", arg);
3554 }
3555 
3556 static int opt_width(void *optctx, const char *opt, const char *arg)
3557 {
3558  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3559  return 0;
3560 }
3561 
3562 static int opt_height(void *optctx, const char *opt, const char *arg)
3563 {
3564  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3565  return 0;
3566 }
3567 
3568 static int opt_format(void *optctx, const char *opt, const char *arg)
3569 {
3570  file_iformat = av_find_input_format(arg);
3571  if (!file_iformat) {
3572  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3573  return AVERROR(EINVAL);
3574  }
3575  return 0;
3576 }
3577 
3578 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3579 {
3580  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3581  return opt_default(NULL, "pixel_format", arg);
3582 }
3583 
3584 static int opt_sync(void *optctx, const char *opt, const char *arg)
3585 {
3586  if (!strcmp(arg, "audio"))
3588  else if (!strcmp(arg, "video"))
3590  else if (!strcmp(arg, "ext"))
3592  else {
3593  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3594  exit(1);
3595  }
3596  return 0;
3597 }
3598 
3599 static int opt_seek(void *optctx, const char *opt, const char *arg)
3600 {
3601  start_time = parse_time_or_die(opt, arg, 1);
3602  return 0;
3603 }
3604 
3605 static int opt_duration(void *optctx, const char *opt, const char *arg)
3606 {
3607  duration = parse_time_or_die(opt, arg, 1);
3608  return 0;
3609 }
3610 
3611 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3612 {
3613  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3614  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3615  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3616  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3617  return 0;
3618 }
3619 
3620 static void opt_input_file(void *optctx, const char *filename)
3621 {
3622  if (input_filename) {
3624  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3625  filename, input_filename);
3626  exit(1);
3627  }
3628  if (!strcmp(filename, "-"))
3629  filename = "pipe:";
3630  input_filename = filename;
3631 }
3632 
3633 static int opt_codec(void *optctx, const char *opt, const char *arg)
3634 {
3635  const char *spec = strchr(opt, ':');
3636  if (!spec) {
3638  "No media specifier was specified in '%s' in option '%s'\n",
3639  arg, opt);
3640  return AVERROR(EINVAL);
3641  }
3642  spec++;
3643  switch (spec[0]) {
3644  case 'a' : audio_codec_name = arg; break;
3645  case 's' : subtitle_codec_name = arg; break;
3646  case 'v' : video_codec_name = arg; break;
3647  default:
3649  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3650  return AVERROR(EINVAL);
3651  }
3652  return 0;
3653 }
3654 
3655 static int dummy;
3656 
3657 static const OptionDef options[] = {
3658 #include "cmdutils_common_opts.h"
3659  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3660  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3661  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3662  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3663  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3664  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3665  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3666  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3667  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3668  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3669  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3670  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3671  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3672  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3673  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3674  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3675  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3676  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3677  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3678  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3679  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3680  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3681  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3682  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3683  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3684  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3685  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3686  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3687  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3688 #if CONFIG_AVFILTER
3689  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3690  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3691 #endif
3692  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3693  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3694  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3695  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3696  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3697  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3698  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3699  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3700  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3701  { NULL, },
3702 };
3703 
3704 static void show_usage(void)
3705 {
3706  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3707  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3708  av_log(NULL, AV_LOG_INFO, "\n");
3709 }
3710 
3711 void show_help_default(const char *opt, const char *arg)
3712 {
3714  show_usage();
3715  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3716  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3717  printf("\n");
3720 #if !CONFIG_AVFILTER
3722 #else
3724 #endif
3725  printf("\nWhile playing:\n"
3726  "q, ESC quit\n"
3727  "f toggle full screen\n"
3728  "p, SPC pause\n"
3729  "m toggle mute\n"
3730  "9, 0 decrease and increase volume respectively\n"
3731  "/, * decrease and increase volume respectively\n"
3732  "a cycle audio channel in the current program\n"
3733  "v cycle video channel\n"
3734  "t cycle subtitle channel in the current program\n"
3735  "c cycle program\n"
3736  "w cycle video filters or show modes\n"
3737  "s activate frame-step mode\n"
3738  "left/right seek backward/forward 10 seconds\n"
3739  "down/up seek backward/forward 1 minute\n"
3740  "page down/page up seek backward/forward 10 minutes\n"
3741  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3742  "left double-click toggle full screen\n"
3743  );
3744 }
3745 
3746 static int lockmgr(void **mtx, enum AVLockOp op)
3747 {
3748  switch(op) {
3749  case AV_LOCK_CREATE:
3750  *mtx = SDL_CreateMutex();
3751  if(!*mtx) {
3752  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3753  return 1;
3754  }
3755  return 0;
3756  case AV_LOCK_OBTAIN:
3757  return !!SDL_LockMutex(*mtx);
3758  case AV_LOCK_RELEASE:
3759  return !!SDL_UnlockMutex(*mtx);
3760  case AV_LOCK_DESTROY:
3761  SDL_DestroyMutex(*mtx);
3762  return 0;
3763  }
3764  return 1;
3765 }
3766 
3767 /* Called from the main */
3768 int main(int argc, char **argv)
3769 {
3770  int flags;
3771  VideoState *is;
3772  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3773 
3775  parse_loglevel(argc, argv, options);
3776 
3777  /* register all codecs, demux and protocols */
3778 #if CONFIG_AVDEVICE
3780 #endif
3781 #if CONFIG_AVFILTER
3783 #endif
3784  av_register_all();
3786 
3787  init_opts();
3788 
3789  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3790  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3791 
3792  show_banner(argc, argv, options);
3793 
3794  parse_options(NULL, argc, argv, options, opt_input_file);
3795 
3796  if (!input_filename) {
3797  show_usage();
3798  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3800  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3801  exit(1);
3802  }
3803 
3804  if (display_disable) {
3805  video_disable = 1;
3806  }
3807  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3808  if (audio_disable)
3809  flags &= ~SDL_INIT_AUDIO;
3810  if (display_disable)
3811  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3812 #if !defined(_WIN32) && !defined(__APPLE__)
3813  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3814 #endif
3815  if (SDL_Init (flags)) {
3816  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3817  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3818  exit(1);
3819  }
3820 
3821  if (!display_disable) {
3822  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3823  fs_screen_width = vi->current_w;
3824  fs_screen_height = vi->current_h;
3825  }
3826 
3827  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3828  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3829  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3830 
3831  SDL_EnableKeyRepeat(SDL_DEFAULT_REPEAT_DELAY, SDL_DEFAULT_REPEAT_INTERVAL);
3832 
3834  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3835  do_exit(NULL);
3836  }
3837 
3838  av_init_packet(&flush_pkt);
3839  flush_pkt.data = (uint8_t *)&flush_pkt;
3840 
3841  is = stream_open(input_filename, file_iformat);
3842  if (!is) {
3843  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3844  do_exit(NULL);
3845  }
3846 
3847  event_loop(is);
3848 
3849  /* never returns */
3850 
3851  return 0;
3852 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1517
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:763
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:492
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3611
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:747
static void video_image_display(VideoState *is)
Definition: ffplay.c:943
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:218
const char const char void * val
Definition: avisynth_c.h:634
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:491
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:468
const char * s
Definition: avisynth_c.h:631
int width
Definition: ffplay.c:295
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1313
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:287
enum AVSampleFormat fmt
Definition: ffplay.c:135
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3562
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
SDL_cond * cond
Definition: ffplay.c:176
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3256
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2575
FrameQueue pictq
Definition: ffplay.c:222
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
Definition: ffplay.c:1262
Decoder auddec
Definition: ffplay.c:226
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:181
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:126
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3699
#define SWS_BICUBIC
Definition: swscale.h:58
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1566
AVOption.
Definition: opt.h:245
double rdftspeed
Definition: ffplay.c:346
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:313
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3620
double get_rotation(AVStream *st)
Definition: cmdutils.c:2066
AVFormatContext * ctx
Definition: movenc-test.c:48
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3282
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3568
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:5311
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:198
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1361
Main libavfilter public API header.
int rindex
Definition: ffplay.c:169
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
static int default_height
Definition: ffplay.c:319
Memory buffer source API.
int av_image_alloc(uint8_t *pointers[4], int linesizes[4], int w, int h, enum AVPixelFormat pix_fmt, int align)
Allocate an image with size w and h and pixel format pix_fmt, and fill pointers and linesizes accordi...
Definition: imgutils.c:191
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:224
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:187
int seek_flags
Definition: ffplay.c:211
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:947
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:121
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4319
static int64_t cur_time
Definition: ffserver.c:262
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3194
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3578
int size
Definition: avcodec.h:1468
const char * b
Definition: vf_curves.c:109
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1425
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:117
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1411
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:326
double audio_diff_cum
Definition: ffplay.c:239
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1935
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:204
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1416
AVCodecContext * avctx
Definition: ffplay.c:190
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1133
int paused
Definition: ffplay.c:207
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3633
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:108
int abort_request
Definition: ffplay.c:120
unsigned num_rects
Definition: avcodec.h:3737
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1325
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1438
SDL_Rect last_display_rect
Definition: ffplay.c:291
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:241
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:496
uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE]
Definition: ffplay.c:246
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:688
int64_t channel_layout
Definition: ffplay.c:134
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:322
AVStream * audio_st
Definition: ffplay.c:243
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2416
static const char * audio_codec_name
Definition: ffplay.c:343
int serial
Definition: ffplay.c:155
AVCodec.
Definition: avcodec.h:3392
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3318
double pts_drift
Definition: ffplay.c:142
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1970
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:5308
int width
Definition: ffplay.c:162
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:766
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:220
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3145
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1661
void * opaque
Definition: avio.h:52
int viddec_width
Definition: ffplay.c:230
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:288
AVSubtitleRect ** rects
Definition: avcodec.h:3738
Format I/O context.
Definition: avformat.h:1314
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3285
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4342
Definition: ffplay.c:151
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:262
int av_sync_type
Definition: ffplay.c:235
unsigned int nb_stream_indexes
Definition: avformat.h:1252
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:174
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3701
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:312
double pts
Definition: ffplay.c:156
static AVFilter ** last_filter
Definition: avfilter.c:501
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:240
AVRational start_pts_tb
Definition: ffplay.c:196
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:97
static int read_thread(void *arg)
Definition: ffplay.c:2822
int keep_last
Definition: ffplay.c:173
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:131
int rdft_bits
Definition: ffplay.c:271
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:878
int size
Definition: ffplay.c:119
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:700
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:103
static int64_t start_time
Definition: ffplay.c:330
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2295
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:89
Lock the mutex.
Definition: avcodec.h:5310
uint8_t
static int nb_streams
Definition: ffprobe.c:240
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:141
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:527
static int default_width
Definition: ffplay.c:318
int last_video_stream
Definition: ffplay.c:307
int last_subtitle_stream
Definition: ffplay.c:307
8 bit with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:74
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:675
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:245
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1140
#define Y
Definition: vf_boxblur.c:76
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2799
struct SwrContext * swr_ctx
Definition: ffplay.c:260
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int finished
Definition: ffplay.c:192
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3346
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:377
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4363
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:262
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:485
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1426
static int framedrop
Definition: ffplay.c:340
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1666
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:75
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1382
AVPacket pkt
Definition: ffplay.c:111
int bytes_per_sec
Definition: ffplay.c:137
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:132
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:358
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:388
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1425
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:512
static void sigterm_handler(int sig)
Definition: ffplay.c:1249
uint8_t * data
Definition: avcodec.h:1467
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:365
int freq
Definition: ffplay.c:132
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4254
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:168
Definition: mxfdec.c:266
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:140
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:155
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:488
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:805
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3702
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:442
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:158
static int64_t duration
Definition: ffplay.c:331
AVRational sar
Definition: ffplay.c:164
AVPacket pkt_temp
Definition: ffplay.c:188
#define A(x)
Definition: vp56_arith.h:28
unsigned int * stream_index
Definition: avformat.h:1251
#define av_log(a,...)
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1697
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:275
PacketQueue videoq
Definition: ffplay.c:285
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2027
AVDictionary * format_opts
Definition: cmdutils.c:69
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:300
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1444
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:480
Main libavdevice API header.
#define U(x)
Definition: vp56_arith.h:37
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3613
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2529
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3406
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3596
int audio_diff_avg_count
Definition: ffplay.c:242
const AVS_VideoInfo * vi
Definition: avisynth_c.h:658
int ytop
Definition: ffplay.c:295
int width
width and height of the video frame
Definition: frame.h:230
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1528
int seek_req
Definition: ffplay.c:210
int(* callback)(void *)
Definition: avio.h:51
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2091
Create a mutex.
Definition: avcodec.h:5309
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:129
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1511
int read_pause_return
Definition: ffplay.c:214
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:487
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:292
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3700
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:757
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:154
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2162
RDFTContext * rdft
Definition: ffplay.c:270
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:781
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:440
static int autorotate
Definition: ffplay.c:354
int capabilities
Codec capabilities.
Definition: avcodec.h:3411
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int out_size
Definition: movenc-test.c:55
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3679
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1462
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:199
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1627
int reallocate
Definition: ffplay.c:161
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:486
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:398
AVChapter ** chapters
Definition: avformat.h:1518
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:342
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1198
int video_stream
Definition: ffplay.c:283
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int * queue_serial
Definition: ffplay.c:147
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1345
int xpos
Definition: ffplay.c:273
int channels
Definition: ffplay.c:133
static enum ShowMode show_mode
Definition: ffplay.c:342
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1247
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:503
static const OptionDef options[]
Definition: ffplay.c:3657
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3655
#define fail()
Definition: checkasm.h:80
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:302
int8_t exp
Definition: eval.c:63
double audio_clock
Definition: ffplay.c:237
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:206
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2338
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:67
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3584
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2346
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2351
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3736
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:643
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3739
static int genpts
Definition: ffplay.c:333
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:896
static AVPacket flush_pkt
Definition: ffplay.c:360
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:343
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:506
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1497
static const char * subtitle_codec_name
Definition: ffplay.c:344
static int subtitle_disable
Definition: ffplay.c:324
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:134
int max_size
Definition: ffplay.c:172
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1370
int step
Definition: ffplay.c:296
SDL_Thread * decoder_tid
Definition: ffplay.c:199
static SDL_Surface * screen
Definition: ffplay.c:365
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:3688
SDL_mutex * mutex
Definition: ffplay.c:122
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:252
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:160
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
int linesize[4]
Definition: avcodec.h:3717
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:128
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:127
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:252
struct MyAVPacketList * next
Definition: ffplay.c:112
#define AV_CH_LAYOUT_STEREO_DOWNMIX
static double lum(void *priv, double x, double y, int plane)
Definition: vf_fftfilt.c:74
char filename[1024]
input or output filename
Definition: avformat.h:1390
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:246
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:175
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:397
int windex
Definition: ffplay.c:170
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:348
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:549
AVSubtitle sub
Definition: ffplay.c:153
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3746
int width
picture width / height.
Definition: avcodec.h:1711
int main(int argc, char **argv)
Definition: ffplay.c:3768
int height
Definition: ffplay.c:163
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3704
int nb_packets
Definition: ffplay.c:118
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3556
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1822
int frame_drops_late
Definition: ffplay.c:262
struct AudioParams audio_src
Definition: ffplay.c:255
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3301
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1339
static void blend_subrect(uint8_t **data, int *linesize, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:861
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:332
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2267
int last_i_start
Definition: ffplay.c:269
uint16_t format
Definition: avcodec.h:3734
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2185
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:117
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1454
int n
Definition: avisynth_c.h:547
static int frame_queue_prev(FrameQueue *f)
Definition: ffplay.c:773
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2419
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:357
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:819
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:385
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
#define src
Definition: vp9dsp.c:530
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:451
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3716
static int decoder_reorder_pts
Definition: ffplay.c:335
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1333
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:267
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:941
int paused
Definition: ffplay.c:146
static const char * input_filename
Definition: ffplay.c:314
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:852
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:710
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3711
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1124
int64_t pos
Definition: ffplay.c:158
FILE * out
Definition: movenc-test.c:54
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:307
Stream structure.
Definition: avformat.h:877
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:2915
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1213
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:936
static int fs_screen_width
Definition: ffplay.c:316
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:242
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4267
char * filename
Definition: ffplay.c:294
static int screen_height
Definition: ffplay.c:321
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3605
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:217
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:375
int64_t next_pts
Definition: ffplay.c:197
static int autoexit
Definition: ffplay.c:336
AVFrame * frame
Definition: ffplay.c:152
int serial
Definition: ffplay.c:145
enum AVMediaType codec_type
Definition: avcodec.h:1540
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:753
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:912
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:59
enum AVCodecID codec_id
Definition: avcodec.h:1549
static void do_exit(VideoState *is)
Definition: ffplay.c:1231
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:252
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:267
int sample_rate
samples per second
Definition: avcodec.h:2287
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:209
AVIOContext * pb
I/O context.
Definition: avformat.h:1356
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:802
static int loop
Definition: ffplay.c:339
int last_paused
Definition: ffplay.c:208
static int exit_on_keydown
Definition: ffplay.c:337
FFT functions.
main external API structure.
Definition: avcodec.h:1532
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:2629
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:545
Decoder subdec
Definition: ffplay.c:228
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:246
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:140
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2510
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:257
Clock vidclk
Definition: ffplay.c:219
int x
Definition: f_ebur128.c:90
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1041
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:492
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1492
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:315
double pts
Definition: ffplay.c:141
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:705
static int audio_thread(void *arg)
Definition: ffplay.c:2075
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
static int av_sync_type
Definition: ffplay.c:329
int pkt_serial
Definition: ffplay.c:191
BYTE int const BYTE int int int height
Definition: avisynth_c.h:676
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:693
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:338
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:904
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:90
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:944
PacketQueue audioq
Definition: ffplay.c:244
int packet_pending
Definition: ffplay.c:193
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:117
int64_t seek_pos
Definition: ffplay.c:212
rational number numerator/denominator
Definition: rational.h:43
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
int allocated
Definition: ffplay.c:160
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:290
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:276
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:146
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:996
SDL_cond * cond
Definition: ffplay.c:123
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2349
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:557
AVMediaType
Definition: avutil.h:191
discard useless packets like 0 size packets in avi
Definition: avcodec.h:683
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2805
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1396
int queue_attachments_req
Definition: ffplay.c:209
unsigned nb_filters
Definition: avfilter.h:764
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1170
#define snprintf
Definition: snprintf.h:34
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:647
int error
contains the error code or 0 if no error happened
Definition: avio.h:192
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:695
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:194
#define FF_ALLOC_EVENT
Definition: ffplay.c:362
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1509
int audio_stream
Definition: ffplay.c:233
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2258
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:267
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:125
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2649
char * name
unique name for this input/output in the list
Definition: avfilter.h:938
static int64_t cursor_last_shown
Definition: ffplay.c:347
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:659
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3550
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:474
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:2634
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1715
static int flags
Definition: cpu.c:47
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1399
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:192
int frame_drops_early
Definition: ffplay.c:261
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2371
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
int sample_array_index
Definition: ffplay.c:268
SDL_cond * continue_read_thread
Definition: ffplay.c:309
int64_t start
Definition: avformat.h:1280
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:680
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:787
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:274
double speed
Definition: ffplay.c:144
static int exit_on_mousedown
Definition: ffplay.c:338
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:984
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1034
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:63
static int video_thread(void *arg)
Definition: ffplay.c:2173
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:182
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1254
AVDictionary * codec_opts
Definition: cmdutils.c:69
struct AudioParams audio_tgt
Definition: ffplay.c:259
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:854
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:5312
if(ret< 0)
Definition: vf_mcdeint.c:282
uint8_t * audio_buf
Definition: ffplay.c:247
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
int muted
Definition: ffplay.c:254
static int display_disable
Definition: ffplay.c:327
static int video_disable
Definition: ffplay.c:323
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3139
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:929
signed 16 bits
Definition: samplefmt.h:62
int audio_buf_index
Definition: ffplay.c:251
uint8_t * audio_buf1
Definition: ffplay.c:248
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3599
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:143
static int screen_width
Definition: ffplay.c:320
PacketQueue * pktq
Definition: ffplay.c:177
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:940
uint32_t start_display_time
Definition: avcodec.h:3735
FFTSample * rdft_data
Definition: ffplay.c:272
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1504
int audio_clock_serial
Definition: ffplay.c:238
#define AV_OPT_SEARCH_FAKE_OBJ
The obj passed to av_opt_find() is fake – only a double pointer to AVClass instead of a required poin...
Definition: opt.h:565
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1279
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:87
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
PacketQueue subtitleq
Definition: ffplay.c:278
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1326
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3777
static int lowres
Definition: ffplay.c:334
int viddec_height
Definition: ffplay.c:231
int eof
Definition: ffplay.c:292
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:567
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:341
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:493
double duration
Definition: ffplay.c:157
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:88
int eof_reached
true if eof reached
Definition: avio.h:186
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:2288
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:250
SDL_Thread * read_tid
Definition: ffplay.c:203
AVPacket pkt
Definition: ffplay.c:187
int frame_size
Definition: ffplay.c:136
void av_log_set_flags(int arg)
Definition: log.c:387
int64_t start_pts
Definition: ffplay.c:195
AVDictionary * opts
Definition: movenc-test.c:50
int abort_request
Definition: ffplay.c:205
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:796
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:426
double last_updated
Definition: ffplay.c:143
Decoder viddec
Definition: ffplay.c:227
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:68
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:422
int height
Definition: ffplay.c:295
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:187
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1634
static void update_volume(VideoState *is, int sign, int step)
Definition: ffplay.c:1449
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:522
An instance of a filter.
Definition: avfilter.h:304
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1466
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:229
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1409
int height
Definition: frame.h:230
static const char * video_codec_name
Definition: ffplay.c:345
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3255
PacketQueue * queue
Definition: ffplay.c:189
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:661
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:715
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
static int subtitle_thread(void *arg)
Definition: ffplay.c:2282
FrameQueue subpq
Definition: ffplay.c:223
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1378
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:171
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:306
#define FF_QUIT_EVENT
Definition: ffplay.c:363
int xleft
Definition: ffplay.c:295
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2078
int stream_index
Definition: avcodec.h:1469
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:919
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:98
int subtitle_stream
Definition: ffplay.c:276
unsigned int audio_buf_size
Definition: ffplay.c:249
int64_t seek_rel
Definition: ffplay.c:213
int realtime
Definition: ffplay.c:216
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:225
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:942
static void video_display(VideoState *is)
Definition: ffplay.c:1303
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:325
AVSubtitleRect ** subrects
Definition: ffplay.c:154
SDL_Overlay * bmp
Definition: ffplay.c:159
static int show_status
Definition: ffplay.c:328
static int compute_mod(int a, int b)
Definition: ffplay.c:991
AVPixelFormat
Pixel format.
Definition: pixfmt.h:61
This structure stores compressed data.
Definition: avcodec.h:1444
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:393
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2532
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:235
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:252
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1353
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:856
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3273
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1460
static int fs_screen_height
Definition: ffplay.c:317
double last_vis_time
Definition: ffplay.c:274
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:969
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:240
#define av_unused
Definition: attributes.h:126
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:155
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:85
AVFormatContext * ic
Definition: ffplay.c:215
simple arithmetic expression evaluator
#define V
Definition: avdct.c:30
int audio_volume
Definition: ffplay.c:253
static int width
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:731
static int16_t block[64]
Definition: dct-test.c:112