FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static int display_disable;
323 static int borderless;
324 static int startup_volume = 100;
325 static int show_status = 1;
327 static int64_t start_time = AV_NOPTS_VALUE;
328 static int64_t duration = AV_NOPTS_VALUE;
329 static int fast = 0;
330 static int genpts = 0;
331 static int lowres = 0;
332 static int decoder_reorder_pts = -1;
333 static int autoexit;
334 static int exit_on_keydown;
335 static int exit_on_mousedown;
336 static int loop = 1;
337 static int framedrop = -1;
338 static int infinite_buffer = -1;
339 static enum ShowMode show_mode = SHOW_MODE_NONE;
340 static const char *audio_codec_name;
341 static const char *subtitle_codec_name;
342 static const char *video_codec_name;
343 double rdftspeed = 0.02;
344 static int64_t cursor_last_shown;
345 static int cursor_hidden = 0;
346 #if CONFIG_AVFILTER
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 #endif
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
359 
360 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
361 
362 static SDL_Window *window;
363 static SDL_Renderer *renderer;
364 static SDL_RendererInfo renderer_info = {0};
365 static SDL_AudioDeviceID audio_dev;
366 
367 static const struct TextureFormatEntry {
371  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
372  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
373  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
374  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
375  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
376  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
377  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
378  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
379  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
380  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
381  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
382  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
383  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
384  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
385  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
386  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
387  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
388  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
389  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
390  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
391 };
392 
393 #if CONFIG_AVFILTER
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
396  GROW_ARRAY(vfilters_list, nb_vfilters);
397  vfilters_list[nb_vfilters - 1] = arg;
398  return 0;
399 }
400 #endif
401 
402 static inline
403 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
404  enum AVSampleFormat fmt2, int64_t channel_count2)
405 {
406  /* If channel count == 1, planar and non-planar formats are the same */
407  if (channel_count1 == 1 && channel_count2 == 1)
409  else
410  return channel_count1 != channel_count2 || fmt1 != fmt2;
411 }
412 
413 static inline
414 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
415 {
416  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
417  return channel_layout;
418  else
419  return 0;
420 }
421 
423 {
424  MyAVPacketList *pkt1;
425 
426  if (q->abort_request)
427  return -1;
428 
429  pkt1 = av_malloc(sizeof(MyAVPacketList));
430  if (!pkt1)
431  return -1;
432  pkt1->pkt = *pkt;
433  pkt1->next = NULL;
434  if (pkt == &flush_pkt)
435  q->serial++;
436  pkt1->serial = q->serial;
437 
438  if (!q->last_pkt)
439  q->first_pkt = pkt1;
440  else
441  q->last_pkt->next = pkt1;
442  q->last_pkt = pkt1;
443  q->nb_packets++;
444  q->size += pkt1->pkt.size + sizeof(*pkt1);
445  q->duration += pkt1->pkt.duration;
446  /* XXX: should duplicate packet data in DV case */
447  SDL_CondSignal(q->cond);
448  return 0;
449 }
450 
452 {
453  int ret;
454 
455  SDL_LockMutex(q->mutex);
456  ret = packet_queue_put_private(q, pkt);
457  SDL_UnlockMutex(q->mutex);
458 
459  if (pkt != &flush_pkt && ret < 0)
460  av_packet_unref(pkt);
461 
462  return ret;
463 }
464 
465 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
466 {
467  AVPacket pkt1, *pkt = &pkt1;
468  av_init_packet(pkt);
469  pkt->data = NULL;
470  pkt->size = 0;
471  pkt->stream_index = stream_index;
472  return packet_queue_put(q, pkt);
473 }
474 
475 /* packet queue handling */
477 {
478  memset(q, 0, sizeof(PacketQueue));
479  q->mutex = SDL_CreateMutex();
480  if (!q->mutex) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->cond = SDL_CreateCond();
485  if (!q->cond) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->abort_request = 1;
490  return 0;
491 }
492 
494 {
495  MyAVPacketList *pkt, *pkt1;
496 
497  SDL_LockMutex(q->mutex);
498  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
499  pkt1 = pkt->next;
500  av_packet_unref(&pkt->pkt);
501  av_freep(&pkt);
502  }
503  q->last_pkt = NULL;
504  q->first_pkt = NULL;
505  q->nb_packets = 0;
506  q->size = 0;
507  q->duration = 0;
508  SDL_UnlockMutex(q->mutex);
509 }
510 
512 {
514  SDL_DestroyMutex(q->mutex);
515  SDL_DestroyCond(q->cond);
516 }
517 
519 {
520  SDL_LockMutex(q->mutex);
521 
522  q->abort_request = 1;
523 
524  SDL_CondSignal(q->cond);
525 
526  SDL_UnlockMutex(q->mutex);
527 }
528 
530 {
531  SDL_LockMutex(q->mutex);
532  q->abort_request = 0;
533  packet_queue_put_private(q, &flush_pkt);
534  SDL_UnlockMutex(q->mutex);
535 }
536 
537 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
538 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
539 {
540  MyAVPacketList *pkt1;
541  int ret;
542 
543  SDL_LockMutex(q->mutex);
544 
545  for (;;) {
546  if (q->abort_request) {
547  ret = -1;
548  break;
549  }
550 
551  pkt1 = q->first_pkt;
552  if (pkt1) {
553  q->first_pkt = pkt1->next;
554  if (!q->first_pkt)
555  q->last_pkt = NULL;
556  q->nb_packets--;
557  q->size -= pkt1->pkt.size + sizeof(*pkt1);
558  q->duration -= pkt1->pkt.duration;
559  *pkt = pkt1->pkt;
560  if (serial)
561  *serial = pkt1->serial;
562  av_free(pkt1);
563  ret = 1;
564  break;
565  } else if (!block) {
566  ret = 0;
567  break;
568  } else {
569  SDL_CondWait(q->cond, q->mutex);
570  }
571  }
572  SDL_UnlockMutex(q->mutex);
573  return ret;
574 }
575 
576 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
577  memset(d, 0, sizeof(Decoder));
578  d->avctx = avctx;
579  d->queue = queue;
580  d->empty_queue_cond = empty_queue_cond;
582  d->pkt_serial = -1;
583 }
584 
586  int ret = AVERROR(EAGAIN);
587 
588  for (;;) {
589  AVPacket pkt;
590 
591  if (d->queue->serial == d->pkt_serial) {
592  do {
593  if (d->queue->abort_request)
594  return -1;
595 
596  switch (d->avctx->codec_type) {
597  case AVMEDIA_TYPE_VIDEO:
598  ret = avcodec_receive_frame(d->avctx, frame);
599  if (ret >= 0) {
600  if (decoder_reorder_pts == -1) {
601  frame->pts = frame->best_effort_timestamp;
602  } else if (!decoder_reorder_pts) {
603  frame->pts = frame->pkt_dts;
604  }
605  }
606  break;
607  case AVMEDIA_TYPE_AUDIO:
608  ret = avcodec_receive_frame(d->avctx, frame);
609  if (ret >= 0) {
610  AVRational tb = (AVRational){1, frame->sample_rate};
611  if (frame->pts != AV_NOPTS_VALUE)
612  frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
613  else if (d->next_pts != AV_NOPTS_VALUE)
614  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
615  if (frame->pts != AV_NOPTS_VALUE) {
616  d->next_pts = frame->pts + frame->nb_samples;
617  d->next_pts_tb = tb;
618  }
619  }
620  break;
621  }
622  if (ret == AVERROR_EOF) {
623  d->finished = d->pkt_serial;
625  return 0;
626  }
627  if (ret >= 0)
628  return 1;
629  } while (ret != AVERROR(EAGAIN));
630  }
631 
632  do {
633  if (d->queue->nb_packets == 0)
634  SDL_CondSignal(d->empty_queue_cond);
635  if (d->packet_pending) {
636  av_packet_move_ref(&pkt, &d->pkt);
637  d->packet_pending = 0;
638  } else {
639  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
640  return -1;
641  }
642  } while (d->queue->serial != d->pkt_serial);
643 
644  if (pkt.data == flush_pkt.data) {
646  d->finished = 0;
647  d->next_pts = d->start_pts;
648  d->next_pts_tb = d->start_pts_tb;
649  } else {
651  int got_frame = 0;
652  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
653  if (ret < 0) {
654  ret = AVERROR(EAGAIN);
655  } else {
656  if (got_frame && !pkt.data) {
657  d->packet_pending = 1;
658  av_packet_move_ref(&d->pkt, &pkt);
659  }
660  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
661  }
662  } else {
663  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
664  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
665  d->packet_pending = 1;
666  av_packet_move_ref(&d->pkt, &pkt);
667  }
668  }
669  av_packet_unref(&pkt);
670  }
671  }
672 }
673 
674 static void decoder_destroy(Decoder *d) {
675  av_packet_unref(&d->pkt);
677 }
678 
680 {
681  av_frame_unref(vp->frame);
682  avsubtitle_free(&vp->sub);
683 }
684 
685 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
686 {
687  int i;
688  memset(f, 0, sizeof(FrameQueue));
689  if (!(f->mutex = SDL_CreateMutex())) {
690  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
691  return AVERROR(ENOMEM);
692  }
693  if (!(f->cond = SDL_CreateCond())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  f->pktq = pktq;
698  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
699  f->keep_last = !!keep_last;
700  for (i = 0; i < f->max_size; i++)
701  if (!(f->queue[i].frame = av_frame_alloc()))
702  return AVERROR(ENOMEM);
703  return 0;
704 }
705 
707 {
708  int i;
709  for (i = 0; i < f->max_size; i++) {
710  Frame *vp = &f->queue[i];
712  av_frame_free(&vp->frame);
713  }
714  SDL_DestroyMutex(f->mutex);
715  SDL_DestroyCond(f->cond);
716 }
717 
719 {
720  SDL_LockMutex(f->mutex);
721  SDL_CondSignal(f->cond);
722  SDL_UnlockMutex(f->mutex);
723 }
724 
726 {
727  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
728 }
729 
731 {
732  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
733 }
734 
736 {
737  return &f->queue[f->rindex];
738 }
739 
741 {
742  /* wait until we have space to put a new frame */
743  SDL_LockMutex(f->mutex);
744  while (f->size >= f->max_size &&
745  !f->pktq->abort_request) {
746  SDL_CondWait(f->cond, f->mutex);
747  }
748  SDL_UnlockMutex(f->mutex);
749 
750  if (f->pktq->abort_request)
751  return NULL;
752 
753  return &f->queue[f->windex];
754 }
755 
757 {
758  /* wait until we have a readable a new frame */
759  SDL_LockMutex(f->mutex);
760  while (f->size - f->rindex_shown <= 0 &&
761  !f->pktq->abort_request) {
762  SDL_CondWait(f->cond, f->mutex);
763  }
764  SDL_UnlockMutex(f->mutex);
765 
766  if (f->pktq->abort_request)
767  return NULL;
768 
769  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
770 }
771 
773 {
774  if (++f->windex == f->max_size)
775  f->windex = 0;
776  SDL_LockMutex(f->mutex);
777  f->size++;
778  SDL_CondSignal(f->cond);
779  SDL_UnlockMutex(f->mutex);
780 }
781 
783 {
784  if (f->keep_last && !f->rindex_shown) {
785  f->rindex_shown = 1;
786  return;
787  }
789  if (++f->rindex == f->max_size)
790  f->rindex = 0;
791  SDL_LockMutex(f->mutex);
792  f->size--;
793  SDL_CondSignal(f->cond);
794  SDL_UnlockMutex(f->mutex);
795 }
796 
797 /* return the number of undisplayed frames in the queue */
799 {
800  return f->size - f->rindex_shown;
801 }
802 
803 /* return last shown position */
805 {
806  Frame *fp = &f->queue[f->rindex];
807  if (f->rindex_shown && fp->serial == f->pktq->serial)
808  return fp->pos;
809  else
810  return -1;
811 }
812 
813 static void decoder_abort(Decoder *d, FrameQueue *fq)
814 {
816  frame_queue_signal(fq);
817  SDL_WaitThread(d->decoder_tid, NULL);
818  d->decoder_tid = NULL;
820 }
821 
822 static inline void fill_rectangle(int x, int y, int w, int h)
823 {
824  SDL_Rect rect;
825  rect.x = x;
826  rect.y = y;
827  rect.w = w;
828  rect.h = h;
829  if (w && h)
830  SDL_RenderFillRect(renderer, &rect);
831 }
832 
833 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
834 {
835  Uint32 format;
836  int access, w, h;
837  if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
838  void *pixels;
839  int pitch;
840  SDL_DestroyTexture(*texture);
841  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
842  return -1;
843  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
844  return -1;
845  if (init_texture) {
846  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
847  return -1;
848  memset(pixels, 0, pitch * new_height);
849  SDL_UnlockTexture(*texture);
850  }
851  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
852  }
853  return 0;
854 }
855 
856 static void calculate_display_rect(SDL_Rect *rect,
857  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
858  int pic_width, int pic_height, AVRational pic_sar)
859 {
860  float aspect_ratio;
861  int width, height, x, y;
862 
863  if (pic_sar.num == 0)
864  aspect_ratio = 0;
865  else
866  aspect_ratio = av_q2d(pic_sar);
867 
868  if (aspect_ratio <= 0.0)
869  aspect_ratio = 1.0;
870  aspect_ratio *= (float)pic_width / (float)pic_height;
871 
872  /* XXX: we suppose the screen has a 1.0 pixel ratio */
873  height = scr_height;
874  width = lrint(height * aspect_ratio) & ~1;
875  if (width > scr_width) {
876  width = scr_width;
877  height = lrint(width / aspect_ratio) & ~1;
878  }
879  x = (scr_width - width) / 2;
880  y = (scr_height - height) / 2;
881  rect->x = scr_xleft + x;
882  rect->y = scr_ytop + y;
883  rect->w = FFMAX(width, 1);
884  rect->h = FFMAX(height, 1);
885 }
886 
887 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
888 {
889  int i;
890  *sdl_blendmode = SDL_BLENDMODE_NONE;
891  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
892  if (format == AV_PIX_FMT_RGB32 ||
893  format == AV_PIX_FMT_RGB32_1 ||
894  format == AV_PIX_FMT_BGR32 ||
895  format == AV_PIX_FMT_BGR32_1)
896  *sdl_blendmode = SDL_BLENDMODE_BLEND;
897  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
898  if (format == sdl_texture_format_map[i].format) {
899  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
900  return;
901  }
902  }
903 }
904 
905 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
906  int ret = 0;
907  Uint32 sdl_pix_fmt;
908  SDL_BlendMode sdl_blendmode;
909  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
910  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
911  return -1;
912  switch (sdl_pix_fmt) {
913  case SDL_PIXELFORMAT_UNKNOWN:
914  /* This should only happen if we are not using avfilter... */
915  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
916  frame->width, frame->height, frame->format, frame->width, frame->height,
918  if (*img_convert_ctx != NULL) {
919  uint8_t *pixels[4];
920  int pitch[4];
921  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
922  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
923  0, frame->height, pixels, pitch);
924  SDL_UnlockTexture(*tex);
925  }
926  } else {
927  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
928  ret = -1;
929  }
930  break;
931  case SDL_PIXELFORMAT_IYUV:
932  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
933  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
934  frame->data[1], frame->linesize[1],
935  frame->data[2], frame->linesize[2]);
936  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
937  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
938  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
939  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
940  } else {
941  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
942  return -1;
943  }
944  break;
945  default:
946  if (frame->linesize[0] < 0) {
947  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
948  } else {
949  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
950  }
951  break;
952  }
953  return ret;
954 }
955 
957 {
958  Frame *vp;
959  Frame *sp = NULL;
960  SDL_Rect rect;
961 
962  vp = frame_queue_peek_last(&is->pictq);
963  if (is->subtitle_st) {
964  if (frame_queue_nb_remaining(&is->subpq) > 0) {
965  sp = frame_queue_peek(&is->subpq);
966 
967  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
968  if (!sp->uploaded) {
969  uint8_t* pixels[4];
970  int pitch[4];
971  int i;
972  if (!sp->width || !sp->height) {
973  sp->width = vp->width;
974  sp->height = vp->height;
975  }
976  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
977  return;
978 
979  for (i = 0; i < sp->sub.num_rects; i++) {
980  AVSubtitleRect *sub_rect = sp->sub.rects[i];
981 
982  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
983  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
984  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
985  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
986 
988  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
989  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
990  0, NULL, NULL, NULL);
991  if (!is->sub_convert_ctx) {
992  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
993  return;
994  }
995  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
996  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
997  0, sub_rect->h, pixels, pitch);
998  SDL_UnlockTexture(is->sub_texture);
999  }
1000  }
1001  sp->uploaded = 1;
1002  }
1003  } else
1004  sp = NULL;
1005  }
1006  }
1007 
1008  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1009 
1010  if (!vp->uploaded) {
1011  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1012  return;
1013  vp->uploaded = 1;
1014  vp->flip_v = vp->frame->linesize[0] < 0;
1015  }
1016 
1017  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1018  if (sp) {
1019 #if USE_ONEPASS_SUBTITLE_RENDER
1020  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1021 #else
1022  int i;
1023  double xratio = (double)rect.w / (double)sp->width;
1024  double yratio = (double)rect.h / (double)sp->height;
1025  for (i = 0; i < sp->sub.num_rects; i++) {
1026  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1027  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1028  .y = rect.y + sub_rect->y * yratio,
1029  .w = sub_rect->w * xratio,
1030  .h = sub_rect->h * yratio};
1031  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1032  }
1033 #endif
1034  }
1035 }
1036 
1037 static inline int compute_mod(int a, int b)
1038 {
1039  return a < 0 ? a%b + b : a%b;
1040 }
1041 
1043 {
1044  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1045  int ch, channels, h, h2;
1046  int64_t time_diff;
1047  int rdft_bits, nb_freq;
1048 
1049  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1050  ;
1051  nb_freq = 1 << (rdft_bits - 1);
1052 
1053  /* compute display index : center on currently output samples */
1054  channels = s->audio_tgt.channels;
1055  nb_display_channels = channels;
1056  if (!s->paused) {
1057  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1058  n = 2 * channels;
1059  delay = s->audio_write_buf_size;
1060  delay /= n;
1061 
1062  /* to be more precise, we take into account the time spent since
1063  the last buffer computation */
1064  if (audio_callback_time) {
1065  time_diff = av_gettime_relative() - audio_callback_time;
1066  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1067  }
1068 
1069  delay += 2 * data_used;
1070  if (delay < data_used)
1071  delay = data_used;
1072 
1073  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1074  if (s->show_mode == SHOW_MODE_WAVES) {
1075  h = INT_MIN;
1076  for (i = 0; i < 1000; i += channels) {
1077  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1078  int a = s->sample_array[idx];
1079  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1080  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1081  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1082  int score = a - d;
1083  if (h < score && (b ^ c) < 0) {
1084  h = score;
1085  i_start = idx;
1086  }
1087  }
1088  }
1089 
1090  s->last_i_start = i_start;
1091  } else {
1092  i_start = s->last_i_start;
1093  }
1094 
1095  if (s->show_mode == SHOW_MODE_WAVES) {
1096  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1097 
1098  /* total height for one channel */
1099  h = s->height / nb_display_channels;
1100  /* graph height / 2 */
1101  h2 = (h * 9) / 20;
1102  for (ch = 0; ch < nb_display_channels; ch++) {
1103  i = i_start + ch;
1104  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1105  for (x = 0; x < s->width; x++) {
1106  y = (s->sample_array[i] * h2) >> 15;
1107  if (y < 0) {
1108  y = -y;
1109  ys = y1 - y;
1110  } else {
1111  ys = y1;
1112  }
1113  fill_rectangle(s->xleft + x, ys, 1, y);
1114  i += channels;
1115  if (i >= SAMPLE_ARRAY_SIZE)
1116  i -= SAMPLE_ARRAY_SIZE;
1117  }
1118  }
1119 
1120  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1121 
1122  for (ch = 1; ch < nb_display_channels; ch++) {
1123  y = s->ytop + ch * h;
1124  fill_rectangle(s->xleft, y, s->width, 1);
1125  }
1126  } else {
1127  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1128  return;
1129 
1130  nb_display_channels= FFMIN(nb_display_channels, 2);
1131  if (rdft_bits != s->rdft_bits) {
1132  av_rdft_end(s->rdft);
1133  av_free(s->rdft_data);
1134  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1135  s->rdft_bits = rdft_bits;
1136  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1137  }
1138  if (!s->rdft || !s->rdft_data){
1139  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1140  s->show_mode = SHOW_MODE_WAVES;
1141  } else {
1142  FFTSample *data[2];
1143  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1144  uint32_t *pixels;
1145  int pitch;
1146  for (ch = 0; ch < nb_display_channels; ch++) {
1147  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1148  i = i_start + ch;
1149  for (x = 0; x < 2 * nb_freq; x++) {
1150  double w = (x-nb_freq) * (1.0 / nb_freq);
1151  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1152  i += channels;
1153  if (i >= SAMPLE_ARRAY_SIZE)
1154  i -= SAMPLE_ARRAY_SIZE;
1155  }
1156  av_rdft_calc(s->rdft, data[ch]);
1157  }
1158  /* Least efficient way to do this, we should of course
1159  * directly access it but it is more than fast enough. */
1160  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1161  pitch >>= 2;
1162  pixels += pitch * s->height;
1163  for (y = 0; y < s->height; y++) {
1164  double w = 1 / sqrt(nb_freq);
1165  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1166  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1167  : a;
1168  a = FFMIN(a, 255);
1169  b = FFMIN(b, 255);
1170  pixels -= pitch;
1171  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1172  }
1173  SDL_UnlockTexture(s->vis_texture);
1174  }
1175  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1176  }
1177  if (!s->paused)
1178  s->xpos++;
1179  if (s->xpos >= s->width)
1180  s->xpos= s->xleft;
1181  }
1182 }
1183 
1184 static void stream_component_close(VideoState *is, int stream_index)
1185 {
1186  AVFormatContext *ic = is->ic;
1187  AVCodecParameters *codecpar;
1188 
1189  if (stream_index < 0 || stream_index >= ic->nb_streams)
1190  return;
1191  codecpar = ic->streams[stream_index]->codecpar;
1192 
1193  switch (codecpar->codec_type) {
1194  case AVMEDIA_TYPE_AUDIO:
1195  decoder_abort(&is->auddec, &is->sampq);
1196  SDL_CloseAudioDevice(audio_dev);
1197  decoder_destroy(&is->auddec);
1198  swr_free(&is->swr_ctx);
1199  av_freep(&is->audio_buf1);
1200  is->audio_buf1_size = 0;
1201  is->audio_buf = NULL;
1202 
1203  if (is->rdft) {
1204  av_rdft_end(is->rdft);
1205  av_freep(&is->rdft_data);
1206  is->rdft = NULL;
1207  is->rdft_bits = 0;
1208  }
1209  break;
1210  case AVMEDIA_TYPE_VIDEO:
1211  decoder_abort(&is->viddec, &is->pictq);
1212  decoder_destroy(&is->viddec);
1213  break;
1214  case AVMEDIA_TYPE_SUBTITLE:
1215  decoder_abort(&is->subdec, &is->subpq);
1216  decoder_destroy(&is->subdec);
1217  break;
1218  default:
1219  break;
1220  }
1221 
1222  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1223  switch (codecpar->codec_type) {
1224  case AVMEDIA_TYPE_AUDIO:
1225  is->audio_st = NULL;
1226  is->audio_stream = -1;
1227  break;
1228  case AVMEDIA_TYPE_VIDEO:
1229  is->video_st = NULL;
1230  is->video_stream = -1;
1231  break;
1232  case AVMEDIA_TYPE_SUBTITLE:
1233  is->subtitle_st = NULL;
1234  is->subtitle_stream = -1;
1235  break;
1236  default:
1237  break;
1238  }
1239 }
1240 
1241 static void stream_close(VideoState *is)
1242 {
1243  /* XXX: use a special url_shutdown call to abort parse cleanly */
1244  is->abort_request = 1;
1245  SDL_WaitThread(is->read_tid, NULL);
1246 
1247  /* close each stream */
1248  if (is->audio_stream >= 0)
1250  if (is->video_stream >= 0)
1252  if (is->subtitle_stream >= 0)
1254 
1255  avformat_close_input(&is->ic);
1256 
1260 
1261  /* free all pictures */
1262  frame_queue_destory(&is->pictq);
1263  frame_queue_destory(&is->sampq);
1264  frame_queue_destory(&is->subpq);
1265  SDL_DestroyCond(is->continue_read_thread);
1268  av_free(is->filename);
1269  if (is->vis_texture)
1270  SDL_DestroyTexture(is->vis_texture);
1271  if (is->vid_texture)
1272  SDL_DestroyTexture(is->vid_texture);
1273  if (is->sub_texture)
1274  SDL_DestroyTexture(is->sub_texture);
1275  av_free(is);
1276 }
1277 
1278 static void do_exit(VideoState *is)
1279 {
1280  if (is) {
1281  stream_close(is);
1282  }
1283  if (renderer)
1284  SDL_DestroyRenderer(renderer);
1285  if (window)
1286  SDL_DestroyWindow(window);
1288  uninit_opts();
1289 #if CONFIG_AVFILTER
1290  av_freep(&vfilters_list);
1291 #endif
1293  if (show_status)
1294  printf("\n");
1295  SDL_Quit();
1296  av_log(NULL, AV_LOG_QUIET, "%s", "");
1297  exit(0);
1298 }
1299 
1300 static void sigterm_handler(int sig)
1301 {
1302  exit(123);
1303 }
1304 
1306 {
1307  SDL_Rect rect;
1308  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1309  default_width = rect.w;
1310  default_height = rect.h;
1311 }
1312 
1313 static int video_open(VideoState *is)
1314 {
1315  int w,h;
1316 
1317  if (screen_width) {
1318  w = screen_width;
1319  h = screen_height;
1320  } else {
1321  w = default_width;
1322  h = default_height;
1323  }
1324 
1325  if (!window_title)
1327  SDL_SetWindowTitle(window, window_title);
1328 
1329  SDL_SetWindowSize(window, w, h);
1330  SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
1331  if (is_full_screen)
1332  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1333  SDL_ShowWindow(window);
1334 
1335  is->width = w;
1336  is->height = h;
1337 
1338  return 0;
1339 }
1340 
1341 /* display the current picture, if any */
1342 static void video_display(VideoState *is)
1343 {
1344  if (!is->width)
1345  video_open(is);
1346 
1347  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1348  SDL_RenderClear(renderer);
1349  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1350  video_audio_display(is);
1351  else if (is->video_st)
1352  video_image_display(is);
1353  SDL_RenderPresent(renderer);
1354 }
1355 
1356 static double get_clock(Clock *c)
1357 {
1358  if (*c->queue_serial != c->serial)
1359  return NAN;
1360  if (c->paused) {
1361  return c->pts;
1362  } else {
1363  double time = av_gettime_relative() / 1000000.0;
1364  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1365  }
1366 }
1367 
1368 static void set_clock_at(Clock *c, double pts, int serial, double time)
1369 {
1370  c->pts = pts;
1371  c->last_updated = time;
1372  c->pts_drift = c->pts - time;
1373  c->serial = serial;
1374 }
1375 
1376 static void set_clock(Clock *c, double pts, int serial)
1377 {
1378  double time = av_gettime_relative() / 1000000.0;
1379  set_clock_at(c, pts, serial, time);
1380 }
1381 
1382 static void set_clock_speed(Clock *c, double speed)
1383 {
1384  set_clock(c, get_clock(c), c->serial);
1385  c->speed = speed;
1386 }
1387 
1388 static void init_clock(Clock *c, int *queue_serial)
1389 {
1390  c->speed = 1.0;
1391  c->paused = 0;
1392  c->queue_serial = queue_serial;
1393  set_clock(c, NAN, -1);
1394 }
1395 
1396 static void sync_clock_to_slave(Clock *c, Clock *slave)
1397 {
1398  double clock = get_clock(c);
1399  double slave_clock = get_clock(slave);
1400  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1401  set_clock(c, slave_clock, slave->serial);
1402 }
1403 
1405  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1406  if (is->video_st)
1407  return AV_SYNC_VIDEO_MASTER;
1408  else
1409  return AV_SYNC_AUDIO_MASTER;
1410  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1411  if (is->audio_st)
1412  return AV_SYNC_AUDIO_MASTER;
1413  else
1414  return AV_SYNC_EXTERNAL_CLOCK;
1415  } else {
1416  return AV_SYNC_EXTERNAL_CLOCK;
1417  }
1418 }
1419 
1420 /* get the current master clock value */
1421 static double get_master_clock(VideoState *is)
1422 {
1423  double val;
1424 
1425  switch (get_master_sync_type(is)) {
1426  case AV_SYNC_VIDEO_MASTER:
1427  val = get_clock(&is->vidclk);
1428  break;
1429  case AV_SYNC_AUDIO_MASTER:
1430  val = get_clock(&is->audclk);
1431  break;
1432  default:
1433  val = get_clock(&is->extclk);
1434  break;
1435  }
1436  return val;
1437 }
1438 
1440  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1443  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1446  } else {
1447  double speed = is->extclk.speed;
1448  if (speed != 1.0)
1449  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1450  }
1451 }
1452 
1453 /* seek in the stream */
1454 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1455 {
1456  if (!is->seek_req) {
1457  is->seek_pos = pos;
1458  is->seek_rel = rel;
1459  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1460  if (seek_by_bytes)
1462  is->seek_req = 1;
1463  SDL_CondSignal(is->continue_read_thread);
1464  }
1465 }
1466 
1467 /* pause or resume the video */
1469 {
1470  if (is->paused) {
1471  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1472  if (is->read_pause_return != AVERROR(ENOSYS)) {
1473  is->vidclk.paused = 0;
1474  }
1475  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1476  }
1477  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1478  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1479 }
1480 
1481 static void toggle_pause(VideoState *is)
1482 {
1483  stream_toggle_pause(is);
1484  is->step = 0;
1485 }
1486 
1487 static void toggle_mute(VideoState *is)
1488 {
1489  is->muted = !is->muted;
1490 }
1491 
1492 static void update_volume(VideoState *is, int sign, double step)
1493 {
1494  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1495  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1496  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1497 }
1498 
1500 {
1501  /* if the stream is paused unpause it, then step */
1502  if (is->paused)
1503  stream_toggle_pause(is);
1504  is->step = 1;
1505 }
1506 
1507 static double compute_target_delay(double delay, VideoState *is)
1508 {
1509  double sync_threshold, diff = 0;
1510 
1511  /* update delay to follow master synchronisation source */
1513  /* if video is slave, we try to correct big delays by
1514  duplicating or deleting a frame */
1515  diff = get_clock(&is->vidclk) - get_master_clock(is);
1516 
1517  /* skip or repeat frame. We take into account the
1518  delay to compute the threshold. I still don't know
1519  if it is the best guess */
1520  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1521  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1522  if (diff <= -sync_threshold)
1523  delay = FFMAX(0, delay + diff);
1524  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1525  delay = delay + diff;
1526  else if (diff >= sync_threshold)
1527  delay = 2 * delay;
1528  }
1529  }
1530 
1531  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1532  delay, -diff);
1533 
1534  return delay;
1535 }
1536 
1537 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1538  if (vp->serial == nextvp->serial) {
1539  double duration = nextvp->pts - vp->pts;
1540  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1541  return vp->duration;
1542  else
1543  return duration;
1544  } else {
1545  return 0.0;
1546  }
1547 }
1548 
1549 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1550  /* update current video pts */
1551  set_clock(&is->vidclk, pts, serial);
1552  sync_clock_to_slave(&is->extclk, &is->vidclk);
1553 }
1554 
1555 /* called to display each frame */
1556 static void video_refresh(void *opaque, double *remaining_time)
1557 {
1558  VideoState *is = opaque;
1559  double time;
1560 
1561  Frame *sp, *sp2;
1562 
1563  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1565 
1566  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1567  time = av_gettime_relative() / 1000000.0;
1568  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1569  video_display(is);
1570  is->last_vis_time = time;
1571  }
1572  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1573  }
1574 
1575  if (is->video_st) {
1576 retry:
1577  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1578  // nothing to do, no picture to display in the queue
1579  } else {
1580  double last_duration, duration, delay;
1581  Frame *vp, *lastvp;
1582 
1583  /* dequeue the picture */
1584  lastvp = frame_queue_peek_last(&is->pictq);
1585  vp = frame_queue_peek(&is->pictq);
1586 
1587  if (vp->serial != is->videoq.serial) {
1588  frame_queue_next(&is->pictq);
1589  goto retry;
1590  }
1591 
1592  if (lastvp->serial != vp->serial)
1593  is->frame_timer = av_gettime_relative() / 1000000.0;
1594 
1595  if (is->paused)
1596  goto display;
1597 
1598  /* compute nominal last_duration */
1599  last_duration = vp_duration(is, lastvp, vp);
1600  delay = compute_target_delay(last_duration, is);
1601 
1602  time= av_gettime_relative()/1000000.0;
1603  if (time < is->frame_timer + delay) {
1604  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1605  goto display;
1606  }
1607 
1608  is->frame_timer += delay;
1609  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1610  is->frame_timer = time;
1611 
1612  SDL_LockMutex(is->pictq.mutex);
1613  if (!isnan(vp->pts))
1614  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1615  SDL_UnlockMutex(is->pictq.mutex);
1616 
1617  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1618  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1619  duration = vp_duration(is, vp, nextvp);
1620  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1621  is->frame_drops_late++;
1622  frame_queue_next(&is->pictq);
1623  goto retry;
1624  }
1625  }
1626 
1627  if (is->subtitle_st) {
1628  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1629  sp = frame_queue_peek(&is->subpq);
1630 
1631  if (frame_queue_nb_remaining(&is->subpq) > 1)
1632  sp2 = frame_queue_peek_next(&is->subpq);
1633  else
1634  sp2 = NULL;
1635 
1636  if (sp->serial != is->subtitleq.serial
1637  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1638  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1639  {
1640  if (sp->uploaded) {
1641  int i;
1642  for (i = 0; i < sp->sub.num_rects; i++) {
1643  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1644  uint8_t *pixels;
1645  int pitch, j;
1646 
1647  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1648  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1649  memset(pixels, 0, sub_rect->w << 2);
1650  SDL_UnlockTexture(is->sub_texture);
1651  }
1652  }
1653  }
1654  frame_queue_next(&is->subpq);
1655  } else {
1656  break;
1657  }
1658  }
1659  }
1660 
1661  frame_queue_next(&is->pictq);
1662  is->force_refresh = 1;
1663 
1664  if (is->step && !is->paused)
1665  stream_toggle_pause(is);
1666  }
1667 display:
1668  /* display picture */
1669  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1670  video_display(is);
1671  }
1672  is->force_refresh = 0;
1673  if (show_status) {
1674  static int64_t last_time;
1675  int64_t cur_time;
1676  int aqsize, vqsize, sqsize;
1677  double av_diff;
1678 
1679  cur_time = av_gettime_relative();
1680  if (!last_time || (cur_time - last_time) >= 30000) {
1681  aqsize = 0;
1682  vqsize = 0;
1683  sqsize = 0;
1684  if (is->audio_st)
1685  aqsize = is->audioq.size;
1686  if (is->video_st)
1687  vqsize = is->videoq.size;
1688  if (is->subtitle_st)
1689  sqsize = is->subtitleq.size;
1690  av_diff = 0;
1691  if (is->audio_st && is->video_st)
1692  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1693  else if (is->video_st)
1694  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1695  else if (is->audio_st)
1696  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1698  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1699  get_master_clock(is),
1700  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1701  av_diff,
1703  aqsize / 1024,
1704  vqsize / 1024,
1705  sqsize,
1708  fflush(stdout);
1709  last_time = cur_time;
1710  }
1711  }
1712 }
1713 
1714 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1715 {
1716  Frame *vp;
1717 
1718 #if defined(DEBUG_SYNC)
1719  printf("frame_type=%c pts=%0.3f\n",
1720  av_get_picture_type_char(src_frame->pict_type), pts);
1721 #endif
1722 
1723  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1724  return -1;
1725 
1726  vp->sar = src_frame->sample_aspect_ratio;
1727  vp->uploaded = 0;
1728 
1729  vp->width = src_frame->width;
1730  vp->height = src_frame->height;
1731  vp->format = src_frame->format;
1732 
1733  vp->pts = pts;
1734  vp->duration = duration;
1735  vp->pos = pos;
1736  vp->serial = serial;
1737 
1738  set_default_window_size(vp->width, vp->height, vp->sar);
1739 
1740  av_frame_move_ref(vp->frame, src_frame);
1741  frame_queue_push(&is->pictq);
1742  return 0;
1743 }
1744 
1746 {
1747  int got_picture;
1748 
1749  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1750  return -1;
1751 
1752  if (got_picture) {
1753  double dpts = NAN;
1754 
1755  if (frame->pts != AV_NOPTS_VALUE)
1756  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1757 
1758  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1759 
1761  if (frame->pts != AV_NOPTS_VALUE) {
1762  double diff = dpts - get_master_clock(is);
1763  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1764  diff - is->frame_last_filter_delay < 0 &&
1765  is->viddec.pkt_serial == is->vidclk.serial &&
1766  is->videoq.nb_packets) {
1767  is->frame_drops_early++;
1768  av_frame_unref(frame);
1769  got_picture = 0;
1770  }
1771  }
1772  }
1773  }
1774 
1775  return got_picture;
1776 }
1777 
1778 #if CONFIG_AVFILTER
1779 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1780  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1781 {
1782  int ret, i;
1783  int nb_filters = graph->nb_filters;
1785 
1786  if (filtergraph) {
1787  outputs = avfilter_inout_alloc();
1788  inputs = avfilter_inout_alloc();
1789  if (!outputs || !inputs) {
1790  ret = AVERROR(ENOMEM);
1791  goto fail;
1792  }
1793 
1794  outputs->name = av_strdup("in");
1795  outputs->filter_ctx = source_ctx;
1796  outputs->pad_idx = 0;
1797  outputs->next = NULL;
1798 
1799  inputs->name = av_strdup("out");
1800  inputs->filter_ctx = sink_ctx;
1801  inputs->pad_idx = 0;
1802  inputs->next = NULL;
1803 
1804  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1805  goto fail;
1806  } else {
1807  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1808  goto fail;
1809  }
1810 
1811  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1812  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1813  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1814 
1815  ret = avfilter_graph_config(graph, NULL);
1816 fail:
1817  avfilter_inout_free(&outputs);
1818  avfilter_inout_free(&inputs);
1819  return ret;
1820 }
1821 
1822 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1823 {
1825  char sws_flags_str[512] = "";
1826  char buffersrc_args[256];
1827  int ret;
1828  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1829  AVCodecParameters *codecpar = is->video_st->codecpar;
1830  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1831  AVDictionaryEntry *e = NULL;
1832  int nb_pix_fmts = 0;
1833  int i, j;
1834 
1835  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1836  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1837  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1838  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1839  break;
1840  }
1841  }
1842  }
1843  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1844 
1845  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1846  if (!strcmp(e->key, "sws_flags")) {
1847  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1848  } else
1849  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1850  }
1851  if (strlen(sws_flags_str))
1852  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1853 
1854  graph->scale_sws_opts = av_strdup(sws_flags_str);
1855 
1856  snprintf(buffersrc_args, sizeof(buffersrc_args),
1857  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1858  frame->width, frame->height, frame->format,
1860  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1861  if (fr.num && fr.den)
1862  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1863 
1864  if ((ret = avfilter_graph_create_filter(&filt_src,
1865  avfilter_get_by_name("buffer"),
1866  "ffplay_buffer", buffersrc_args, NULL,
1867  graph)) < 0)
1868  goto fail;
1869 
1870  ret = avfilter_graph_create_filter(&filt_out,
1871  avfilter_get_by_name("buffersink"),
1872  "ffplay_buffersink", NULL, NULL, graph);
1873  if (ret < 0)
1874  goto fail;
1875 
1876  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1877  goto fail;
1878 
1879  last_filter = filt_out;
1880 
1881 /* Note: this macro adds a filter before the lastly added filter, so the
1882  * processing order of the filters is in reverse */
1883 #define INSERT_FILT(name, arg) do { \
1884  AVFilterContext *filt_ctx; \
1885  \
1886  ret = avfilter_graph_create_filter(&filt_ctx, \
1887  avfilter_get_by_name(name), \
1888  "ffplay_" name, arg, NULL, graph); \
1889  if (ret < 0) \
1890  goto fail; \
1891  \
1892  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1893  if (ret < 0) \
1894  goto fail; \
1895  \
1896  last_filter = filt_ctx; \
1897 } while (0)
1898 
1899  if (autorotate) {
1900  double theta = get_rotation(is->video_st);
1901 
1902  if (fabs(theta - 90) < 1.0) {
1903  INSERT_FILT("transpose", "clock");
1904  } else if (fabs(theta - 180) < 1.0) {
1905  INSERT_FILT("hflip", NULL);
1906  INSERT_FILT("vflip", NULL);
1907  } else if (fabs(theta - 270) < 1.0) {
1908  INSERT_FILT("transpose", "cclock");
1909  } else if (fabs(theta) > 1.0) {
1910  char rotate_buf[64];
1911  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1912  INSERT_FILT("rotate", rotate_buf);
1913  }
1914  }
1915 
1916  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1917  goto fail;
1918 
1919  is->in_video_filter = filt_src;
1920  is->out_video_filter = filt_out;
1921 
1922 fail:
1923  return ret;
1924 }
1925 
1926 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1927 {
1929  int sample_rates[2] = { 0, -1 };
1930  int64_t channel_layouts[2] = { 0, -1 };
1931  int channels[2] = { 0, -1 };
1932  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1933  char aresample_swr_opts[512] = "";
1934  AVDictionaryEntry *e = NULL;
1935  char asrc_args[256];
1936  int ret;
1937 
1938  avfilter_graph_free(&is->agraph);
1939  if (!(is->agraph = avfilter_graph_alloc()))
1940  return AVERROR(ENOMEM);
1941 
1942  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1943  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1944  if (strlen(aresample_swr_opts))
1945  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1946  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1947 
1948  ret = snprintf(asrc_args, sizeof(asrc_args),
1949  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1950  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1951  is->audio_filter_src.channels,
1952  1, is->audio_filter_src.freq);
1953  if (is->audio_filter_src.channel_layout)
1954  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1955  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1956 
1957  ret = avfilter_graph_create_filter(&filt_asrc,
1958  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1959  asrc_args, NULL, is->agraph);
1960  if (ret < 0)
1961  goto end;
1962 
1963 
1964  ret = avfilter_graph_create_filter(&filt_asink,
1965  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1966  NULL, NULL, is->agraph);
1967  if (ret < 0)
1968  goto end;
1969 
1970  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1971  goto end;
1972  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1973  goto end;
1974 
1975  if (force_output_format) {
1976  channel_layouts[0] = is->audio_tgt.channel_layout;
1977  channels [0] = is->audio_tgt.channels;
1978  sample_rates [0] = is->audio_tgt.freq;
1979  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1980  goto end;
1981  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1982  goto end;
1983  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1984  goto end;
1985  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1986  goto end;
1987  }
1988 
1989 
1990  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1991  goto end;
1992 
1993  is->in_audio_filter = filt_asrc;
1994  is->out_audio_filter = filt_asink;
1995 
1996 end:
1997  if (ret < 0)
1998  avfilter_graph_free(&is->agraph);
1999  return ret;
2000 }
2001 #endif /* CONFIG_AVFILTER */
2002 
2003 static int audio_thread(void *arg)
2004 {
2005  VideoState *is = arg;
2006  AVFrame *frame = av_frame_alloc();
2007  Frame *af;
2008 #if CONFIG_AVFILTER
2009  int last_serial = -1;
2010  int64_t dec_channel_layout;
2011  int reconfigure;
2012 #endif
2013  int got_frame = 0;
2014  AVRational tb;
2015  int ret = 0;
2016 
2017  if (!frame)
2018  return AVERROR(ENOMEM);
2019 
2020  do {
2021  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2022  goto the_end;
2023 
2024  if (got_frame) {
2025  tb = (AVRational){1, frame->sample_rate};
2026 
2027 #if CONFIG_AVFILTER
2028  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2029 
2030  reconfigure =
2031  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2032  frame->format, frame->channels) ||
2033  is->audio_filter_src.channel_layout != dec_channel_layout ||
2034  is->audio_filter_src.freq != frame->sample_rate ||
2035  is->auddec.pkt_serial != last_serial;
2036 
2037  if (reconfigure) {
2038  char buf1[1024], buf2[1024];
2039  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2040  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2042  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2043  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2044  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2045 
2046  is->audio_filter_src.fmt = frame->format;
2047  is->audio_filter_src.channels = frame->channels;
2048  is->audio_filter_src.channel_layout = dec_channel_layout;
2049  is->audio_filter_src.freq = frame->sample_rate;
2050  last_serial = is->auddec.pkt_serial;
2051 
2052  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2053  goto the_end;
2054  }
2055 
2056  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2057  goto the_end;
2058 
2059  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2060  tb = av_buffersink_get_time_base(is->out_audio_filter);
2061 #endif
2062  if (!(af = frame_queue_peek_writable(&is->sampq)))
2063  goto the_end;
2064 
2065  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2066  af->pos = frame->pkt_pos;
2067  af->serial = is->auddec.pkt_serial;
2068  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2069 
2070  av_frame_move_ref(af->frame, frame);
2071  frame_queue_push(&is->sampq);
2072 
2073 #if CONFIG_AVFILTER
2074  if (is->audioq.serial != is->auddec.pkt_serial)
2075  break;
2076  }
2077  if (ret == AVERROR_EOF)
2078  is->auddec.finished = is->auddec.pkt_serial;
2079 #endif
2080  }
2081  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2082  the_end:
2083 #if CONFIG_AVFILTER
2084  avfilter_graph_free(&is->agraph);
2085 #endif
2086  av_frame_free(&frame);
2087  return ret;
2088 }
2089 
2090 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2091 {
2093  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2094  if (!d->decoder_tid) {
2095  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2096  return AVERROR(ENOMEM);
2097  }
2098  return 0;
2099 }
2100 
2101 static int video_thread(void *arg)
2102 {
2103  VideoState *is = arg;
2104  AVFrame *frame = av_frame_alloc();
2105  double pts;
2106  double duration;
2107  int ret;
2109  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2110 
2111 #if CONFIG_AVFILTER
2113  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2114  int last_w = 0;
2115  int last_h = 0;
2116  enum AVPixelFormat last_format = -2;
2117  int last_serial = -1;
2118  int last_vfilter_idx = 0;
2119  if (!graph) {
2120  av_frame_free(&frame);
2121  return AVERROR(ENOMEM);
2122  }
2123 
2124 #endif
2125 
2126  if (!frame) {
2127 #if CONFIG_AVFILTER
2128  avfilter_graph_free(&graph);
2129 #endif
2130  return AVERROR(ENOMEM);
2131  }
2132 
2133  for (;;) {
2134  ret = get_video_frame(is, frame);
2135  if (ret < 0)
2136  goto the_end;
2137  if (!ret)
2138  continue;
2139 
2140 #if CONFIG_AVFILTER
2141  if ( last_w != frame->width
2142  || last_h != frame->height
2143  || last_format != frame->format
2144  || last_serial != is->viddec.pkt_serial
2145  || last_vfilter_idx != is->vfilter_idx) {
2147  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2148  last_w, last_h,
2149  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2150  frame->width, frame->height,
2151  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2152  avfilter_graph_free(&graph);
2153  graph = avfilter_graph_alloc();
2154  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2155  SDL_Event event;
2156  event.type = FF_QUIT_EVENT;
2157  event.user.data1 = is;
2158  SDL_PushEvent(&event);
2159  goto the_end;
2160  }
2161  filt_in = is->in_video_filter;
2162  filt_out = is->out_video_filter;
2163  last_w = frame->width;
2164  last_h = frame->height;
2165  last_format = frame->format;
2166  last_serial = is->viddec.pkt_serial;
2167  last_vfilter_idx = is->vfilter_idx;
2168  frame_rate = av_buffersink_get_frame_rate(filt_out);
2169  }
2170 
2171  ret = av_buffersrc_add_frame(filt_in, frame);
2172  if (ret < 0)
2173  goto the_end;
2174 
2175  while (ret >= 0) {
2176  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2177 
2178  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2179  if (ret < 0) {
2180  if (ret == AVERROR_EOF)
2181  is->viddec.finished = is->viddec.pkt_serial;
2182  ret = 0;
2183  break;
2184  }
2185 
2187  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2188  is->frame_last_filter_delay = 0;
2189  tb = av_buffersink_get_time_base(filt_out);
2190 #endif
2191  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2192  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2193  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2194  av_frame_unref(frame);
2195 #if CONFIG_AVFILTER
2196  }
2197 #endif
2198 
2199  if (ret < 0)
2200  goto the_end;
2201  }
2202  the_end:
2203 #if CONFIG_AVFILTER
2204  avfilter_graph_free(&graph);
2205 #endif
2206  av_frame_free(&frame);
2207  return 0;
2208 }
2209 
2210 static int subtitle_thread(void *arg)
2211 {
2212  VideoState *is = arg;
2213  Frame *sp;
2214  int got_subtitle;
2215  double pts;
2216 
2217  for (;;) {
2218  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2219  return 0;
2220 
2221  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2222  break;
2223 
2224  pts = 0;
2225 
2226  if (got_subtitle && sp->sub.format == 0) {
2227  if (sp->sub.pts != AV_NOPTS_VALUE)
2228  pts = sp->sub.pts / (double)AV_TIME_BASE;
2229  sp->pts = pts;
2230  sp->serial = is->subdec.pkt_serial;
2231  sp->width = is->subdec.avctx->width;
2232  sp->height = is->subdec.avctx->height;
2233  sp->uploaded = 0;
2234 
2235  /* now we can update the picture count */
2236  frame_queue_push(&is->subpq);
2237  } else if (got_subtitle) {
2238  avsubtitle_free(&sp->sub);
2239  }
2240  }
2241  return 0;
2242 }
2243 
2244 /* copy samples for viewing in editor window */
2245 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2246 {
2247  int size, len;
2248 
2249  size = samples_size / sizeof(short);
2250  while (size > 0) {
2252  if (len > size)
2253  len = size;
2254  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2255  samples += len;
2256  is->sample_array_index += len;
2258  is->sample_array_index = 0;
2259  size -= len;
2260  }
2261 }
2262 
2263 /* return the wanted number of samples to get better sync if sync_type is video
2264  * or external master clock */
2265 static int synchronize_audio(VideoState *is, int nb_samples)
2266 {
2267  int wanted_nb_samples = nb_samples;
2268 
2269  /* if not master, then we try to remove or add samples to correct the clock */
2271  double diff, avg_diff;
2272  int min_nb_samples, max_nb_samples;
2273 
2274  diff = get_clock(&is->audclk) - get_master_clock(is);
2275 
2276  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2277  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2279  /* not enough measures to have a correct estimate */
2280  is->audio_diff_avg_count++;
2281  } else {
2282  /* estimate the A-V difference */
2283  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2284 
2285  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2286  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2287  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2288  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2289  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2290  }
2291  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2292  diff, avg_diff, wanted_nb_samples - nb_samples,
2294  }
2295  } else {
2296  /* too big difference : may be initial PTS errors, so
2297  reset A-V filter */
2298  is->audio_diff_avg_count = 0;
2299  is->audio_diff_cum = 0;
2300  }
2301  }
2302 
2303  return wanted_nb_samples;
2304 }
2305 
2306 /**
2307  * Decode one audio frame and return its uncompressed size.
2308  *
2309  * The processed audio frame is decoded, converted if required, and
2310  * stored in is->audio_buf, with size in bytes given by the return
2311  * value.
2312  */
2314 {
2315  int data_size, resampled_data_size;
2316  int64_t dec_channel_layout;
2317  av_unused double audio_clock0;
2318  int wanted_nb_samples;
2319  Frame *af;
2320 
2321  if (is->paused)
2322  return -1;
2323 
2324  do {
2325 #if defined(_WIN32)
2326  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2328  return -1;
2329  av_usleep (1000);
2330  }
2331 #endif
2332  if (!(af = frame_queue_peek_readable(&is->sampq)))
2333  return -1;
2334  frame_queue_next(&is->sampq);
2335  } while (af->serial != is->audioq.serial);
2336 
2337  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2338  af->frame->nb_samples,
2339  af->frame->format, 1);
2340 
2341  dec_channel_layout =
2344  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2345 
2346  if (af->frame->format != is->audio_src.fmt ||
2347  dec_channel_layout != is->audio_src.channel_layout ||
2348  af->frame->sample_rate != is->audio_src.freq ||
2349  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2350  swr_free(&is->swr_ctx);
2353  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2354  0, NULL);
2355  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2357  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2360  swr_free(&is->swr_ctx);
2361  return -1;
2362  }
2363  is->audio_src.channel_layout = dec_channel_layout;
2364  is->audio_src.channels = af->frame->channels;
2365  is->audio_src.freq = af->frame->sample_rate;
2366  is->audio_src.fmt = af->frame->format;
2367  }
2368 
2369  if (is->swr_ctx) {
2370  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2371  uint8_t **out = &is->audio_buf1;
2372  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2373  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2374  int len2;
2375  if (out_size < 0) {
2376  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2377  return -1;
2378  }
2379  if (wanted_nb_samples != af->frame->nb_samples) {
2380  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2381  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2382  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2383  return -1;
2384  }
2385  }
2386  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2387  if (!is->audio_buf1)
2388  return AVERROR(ENOMEM);
2389  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2390  if (len2 < 0) {
2391  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2392  return -1;
2393  }
2394  if (len2 == out_count) {
2395  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2396  if (swr_init(is->swr_ctx) < 0)
2397  swr_free(&is->swr_ctx);
2398  }
2399  is->audio_buf = is->audio_buf1;
2400  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2401  } else {
2402  is->audio_buf = af->frame->data[0];
2403  resampled_data_size = data_size;
2404  }
2405 
2406  audio_clock0 = is->audio_clock;
2407  /* update the audio clock with the pts */
2408  if (!isnan(af->pts))
2409  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2410  else
2411  is->audio_clock = NAN;
2412  is->audio_clock_serial = af->serial;
2413 #ifdef DEBUG
2414  {
2415  static double last_clock;
2416  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2417  is->audio_clock - last_clock,
2418  is->audio_clock, audio_clock0);
2419  last_clock = is->audio_clock;
2420  }
2421 #endif
2422  return resampled_data_size;
2423 }
2424 
2425 /* prepare a new audio buffer */
2426 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2427 {
2428  VideoState *is = opaque;
2429  int audio_size, len1;
2430 
2432 
2433  while (len > 0) {
2434  if (is->audio_buf_index >= is->audio_buf_size) {
2435  audio_size = audio_decode_frame(is);
2436  if (audio_size < 0) {
2437  /* if error, just output silence */
2438  is->audio_buf = NULL;
2440  } else {
2441  if (is->show_mode != SHOW_MODE_VIDEO)
2442  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2443  is->audio_buf_size = audio_size;
2444  }
2445  is->audio_buf_index = 0;
2446  }
2447  len1 = is->audio_buf_size - is->audio_buf_index;
2448  if (len1 > len)
2449  len1 = len;
2450  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2451  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2452  else {
2453  memset(stream, 0, len1);
2454  if (!is->muted && is->audio_buf)
2455  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2456  }
2457  len -= len1;
2458  stream += len1;
2459  is->audio_buf_index += len1;
2460  }
2462  /* Let's assume the audio driver that is used by SDL has two periods. */
2463  if (!isnan(is->audio_clock)) {
2465  sync_clock_to_slave(&is->extclk, &is->audclk);
2466  }
2467 }
2468 
2469 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2470 {
2471  SDL_AudioSpec wanted_spec, spec;
2472  const char *env;
2473  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2474  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2475  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2476 
2477  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2478  if (env) {
2479  wanted_nb_channels = atoi(env);
2480  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2481  }
2482  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2483  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2484  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2485  }
2486  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2487  wanted_spec.channels = wanted_nb_channels;
2488  wanted_spec.freq = wanted_sample_rate;
2489  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2490  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2491  return -1;
2492  }
2493  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2494  next_sample_rate_idx--;
2495  wanted_spec.format = AUDIO_S16SYS;
2496  wanted_spec.silence = 0;
2497  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2498  wanted_spec.callback = sdl_audio_callback;
2499  wanted_spec.userdata = opaque;
2500  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2501  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2502  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2503  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2504  if (!wanted_spec.channels) {
2505  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2506  wanted_spec.channels = wanted_nb_channels;
2507  if (!wanted_spec.freq) {
2509  "No more combinations to try, audio open failed\n");
2510  return -1;
2511  }
2512  }
2513  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2514  }
2515  if (spec.format != AUDIO_S16SYS) {
2517  "SDL advised audio format %d is not supported!\n", spec.format);
2518  return -1;
2519  }
2520  if (spec.channels != wanted_spec.channels) {
2521  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2522  if (!wanted_channel_layout) {
2524  "SDL advised channel count %d is not supported!\n", spec.channels);
2525  return -1;
2526  }
2527  }
2528 
2529  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2530  audio_hw_params->freq = spec.freq;
2531  audio_hw_params->channel_layout = wanted_channel_layout;
2532  audio_hw_params->channels = spec.channels;
2533  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2534  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2535  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2536  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2537  return -1;
2538  }
2539  return spec.size;
2540 }
2541 
2542 /* open a given stream. Return 0 if OK */
2543 static int stream_component_open(VideoState *is, int stream_index)
2544 {
2545  AVFormatContext *ic = is->ic;
2546  AVCodecContext *avctx;
2547  AVCodec *codec;
2548  const char *forced_codec_name = NULL;
2549  AVDictionary *opts = NULL;
2550  AVDictionaryEntry *t = NULL;
2551  int sample_rate, nb_channels;
2552  int64_t channel_layout;
2553  int ret = 0;
2554  int stream_lowres = lowres;
2555 
2556  if (stream_index < 0 || stream_index >= ic->nb_streams)
2557  return -1;
2558 
2559  avctx = avcodec_alloc_context3(NULL);
2560  if (!avctx)
2561  return AVERROR(ENOMEM);
2562 
2563  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2564  if (ret < 0)
2565  goto fail;
2566  av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
2567 
2568  codec = avcodec_find_decoder(avctx->codec_id);
2569 
2570  switch(avctx->codec_type){
2571  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2572  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2573  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2574  }
2575  if (forced_codec_name)
2576  codec = avcodec_find_decoder_by_name(forced_codec_name);
2577  if (!codec) {
2578  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2579  "No codec could be found with name '%s'\n", forced_codec_name);
2580  else av_log(NULL, AV_LOG_WARNING,
2581  "No codec could be found with id %d\n", avctx->codec_id);
2582  ret = AVERROR(EINVAL);
2583  goto fail;
2584  }
2585 
2586  avctx->codec_id = codec->id;
2587  if(stream_lowres > av_codec_get_max_lowres(codec)){
2588  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2589  av_codec_get_max_lowres(codec));
2590  stream_lowres = av_codec_get_max_lowres(codec);
2591  }
2592  av_codec_set_lowres(avctx, stream_lowres);
2593 
2594 #if FF_API_EMU_EDGE
2595  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2596 #endif
2597  if (fast)
2598  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2599 #if FF_API_EMU_EDGE
2600  if(codec->capabilities & AV_CODEC_CAP_DR1)
2601  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2602 #endif
2603 
2604  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2605  if (!av_dict_get(opts, "threads", NULL, 0))
2606  av_dict_set(&opts, "threads", "auto", 0);
2607  if (stream_lowres)
2608  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2609  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2610  av_dict_set(&opts, "refcounted_frames", "1", 0);
2611  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2612  goto fail;
2613  }
2614  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2615  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2617  goto fail;
2618  }
2619 
2620  is->eof = 0;
2621  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2622  switch (avctx->codec_type) {
2623  case AVMEDIA_TYPE_AUDIO:
2624 #if CONFIG_AVFILTER
2625  {
2626  AVFilterContext *sink;
2627 
2628  is->audio_filter_src.freq = avctx->sample_rate;
2629  is->audio_filter_src.channels = avctx->channels;
2630  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2631  is->audio_filter_src.fmt = avctx->sample_fmt;
2632  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2633  goto fail;
2634  sink = is->out_audio_filter;
2635  sample_rate = av_buffersink_get_sample_rate(sink);
2636  nb_channels = av_buffersink_get_channels(sink);
2637  channel_layout = av_buffersink_get_channel_layout(sink);
2638  }
2639 #else
2640  sample_rate = avctx->sample_rate;
2641  nb_channels = avctx->channels;
2642  channel_layout = avctx->channel_layout;
2643 #endif
2644 
2645  /* prepare audio output */
2646  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2647  goto fail;
2648  is->audio_hw_buf_size = ret;
2649  is->audio_src = is->audio_tgt;
2650  is->audio_buf_size = 0;
2651  is->audio_buf_index = 0;
2652 
2653  /* init averaging filter */
2654  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2655  is->audio_diff_avg_count = 0;
2656  /* since we do not have a precise anough audio FIFO fullness,
2657  we correct audio sync only if larger than this threshold */
2659 
2660  is->audio_stream = stream_index;
2661  is->audio_st = ic->streams[stream_index];
2662 
2663  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2665  is->auddec.start_pts = is->audio_st->start_time;
2667  }
2668  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2669  goto out;
2670  SDL_PauseAudioDevice(audio_dev, 0);
2671  break;
2672  case AVMEDIA_TYPE_VIDEO:
2673  is->video_stream = stream_index;
2674  is->video_st = ic->streams[stream_index];
2675 
2676  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2677  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2678  goto out;
2679  is->queue_attachments_req = 1;
2680  break;
2681  case AVMEDIA_TYPE_SUBTITLE:
2682  is->subtitle_stream = stream_index;
2683  is->subtitle_st = ic->streams[stream_index];
2684 
2685  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2686  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2687  goto out;
2688  break;
2689  default:
2690  break;
2691  }
2692  goto out;
2693 
2694 fail:
2695  avcodec_free_context(&avctx);
2696 out:
2697  av_dict_free(&opts);
2698 
2699  return ret;
2700 }
2701 
2702 static int decode_interrupt_cb(void *ctx)
2703 {
2704  VideoState *is = ctx;
2705  return is->abort_request;
2706 }
2707 
2708 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2709  return stream_id < 0 ||
2710  queue->abort_request ||
2712  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2713 }
2714 
2716 {
2717  if( !strcmp(s->iformat->name, "rtp")
2718  || !strcmp(s->iformat->name, "rtsp")
2719  || !strcmp(s->iformat->name, "sdp")
2720  )
2721  return 1;
2722 
2723  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2724  || !strncmp(s->filename, "udp:", 4)
2725  )
2726  )
2727  return 1;
2728  return 0;
2729 }
2730 
2731 /* this thread gets the stream from the disk or the network */
2732 static int read_thread(void *arg)
2733 {
2734  VideoState *is = arg;
2735  AVFormatContext *ic = NULL;
2736  int err, i, ret;
2737  int st_index[AVMEDIA_TYPE_NB];
2738  AVPacket pkt1, *pkt = &pkt1;
2739  int64_t stream_start_time;
2740  int pkt_in_play_range = 0;
2741  AVDictionaryEntry *t;
2742  SDL_mutex *wait_mutex = SDL_CreateMutex();
2743  int scan_all_pmts_set = 0;
2744  int64_t pkt_ts;
2745 
2746  if (!wait_mutex) {
2747  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2748  ret = AVERROR(ENOMEM);
2749  goto fail;
2750  }
2751 
2752  memset(st_index, -1, sizeof(st_index));
2753  is->last_video_stream = is->video_stream = -1;
2754  is->last_audio_stream = is->audio_stream = -1;
2755  is->last_subtitle_stream = is->subtitle_stream = -1;
2756  is->eof = 0;
2757 
2758  ic = avformat_alloc_context();
2759  if (!ic) {
2760  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2761  ret = AVERROR(ENOMEM);
2762  goto fail;
2763  }
2765  ic->interrupt_callback.opaque = is;
2766  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2767  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2768  scan_all_pmts_set = 1;
2769  }
2770  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2771  if (err < 0) {
2772  print_error(is->filename, err);
2773  ret = -1;
2774  goto fail;
2775  }
2776  if (scan_all_pmts_set)
2777  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2778 
2780  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2782  goto fail;
2783  }
2784  is->ic = ic;
2785 
2786  if (genpts)
2787  ic->flags |= AVFMT_FLAG_GENPTS;
2788 
2790 
2791  if (find_stream_info) {
2793  int orig_nb_streams = ic->nb_streams;
2794 
2795  err = avformat_find_stream_info(ic, opts);
2796 
2797  for (i = 0; i < orig_nb_streams; i++)
2798  av_dict_free(&opts[i]);
2799  av_freep(&opts);
2800 
2801  if (err < 0) {
2803  "%s: could not find codec parameters\n", is->filename);
2804  ret = -1;
2805  goto fail;
2806  }
2807  }
2808 
2809  if (ic->pb)
2810  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2811 
2812  if (seek_by_bytes < 0)
2813  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2814 
2815  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2816 
2817  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2818  window_title = av_asprintf("%s - %s", t->value, input_filename);
2819 
2820  /* if seeking requested, we execute it */
2821  if (start_time != AV_NOPTS_VALUE) {
2822  int64_t timestamp;
2823 
2824  timestamp = start_time;
2825  /* add the stream start time */
2826  if (ic->start_time != AV_NOPTS_VALUE)
2827  timestamp += ic->start_time;
2828  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2829  if (ret < 0) {
2830  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2831  is->filename, (double)timestamp / AV_TIME_BASE);
2832  }
2833  }
2834 
2835  is->realtime = is_realtime(ic);
2836 
2837  if (show_status)
2838  av_dump_format(ic, 0, is->filename, 0);
2839 
2840  for (i = 0; i < ic->nb_streams; i++) {
2841  AVStream *st = ic->streams[i];
2842  enum AVMediaType type = st->codecpar->codec_type;
2843  st->discard = AVDISCARD_ALL;
2844  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2845  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2846  st_index[type] = i;
2847  }
2848  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2849  if (wanted_stream_spec[i] && st_index[i] == -1) {
2850  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2851  st_index[i] = INT_MAX;
2852  }
2853  }
2854 
2855  if (!video_disable)
2856  st_index[AVMEDIA_TYPE_VIDEO] =
2858  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2859  if (!audio_disable)
2860  st_index[AVMEDIA_TYPE_AUDIO] =
2862  st_index[AVMEDIA_TYPE_AUDIO],
2863  st_index[AVMEDIA_TYPE_VIDEO],
2864  NULL, 0);
2866  st_index[AVMEDIA_TYPE_SUBTITLE] =
2868  st_index[AVMEDIA_TYPE_SUBTITLE],
2869  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2870  st_index[AVMEDIA_TYPE_AUDIO] :
2871  st_index[AVMEDIA_TYPE_VIDEO]),
2872  NULL, 0);
2873 
2874  is->show_mode = show_mode;
2875  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2876  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2877  AVCodecParameters *codecpar = st->codecpar;
2879  if (codecpar->width)
2880  set_default_window_size(codecpar->width, codecpar->height, sar);
2881  }
2882 
2883  /* open the streams */
2884  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2885  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2886  }
2887 
2888  ret = -1;
2889  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2890  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2891  }
2892  if (is->show_mode == SHOW_MODE_NONE)
2893  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2894 
2895  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2896  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2897  }
2898 
2899  if (is->video_stream < 0 && is->audio_stream < 0) {
2900  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2901  is->filename);
2902  ret = -1;
2903  goto fail;
2904  }
2905 
2906  if (infinite_buffer < 0 && is->realtime)
2907  infinite_buffer = 1;
2908 
2909  for (;;) {
2910  if (is->abort_request)
2911  break;
2912  if (is->paused != is->last_paused) {
2913  is->last_paused = is->paused;
2914  if (is->paused)
2915  is->read_pause_return = av_read_pause(ic);
2916  else
2917  av_read_play(ic);
2918  }
2919 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2920  if (is->paused &&
2921  (!strcmp(ic->iformat->name, "rtsp") ||
2922  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2923  /* wait 10 ms to avoid trying to get another packet */
2924  /* XXX: horrible */
2925  SDL_Delay(10);
2926  continue;
2927  }
2928 #endif
2929  if (is->seek_req) {
2930  int64_t seek_target = is->seek_pos;
2931  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2932  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2933 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2934 // of the seek_pos/seek_rel variables
2935 
2936  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2937  if (ret < 0) {
2939  "%s: error while seeking\n", is->ic->filename);
2940  } else {
2941  if (is->audio_stream >= 0) {
2942  packet_queue_flush(&is->audioq);
2943  packet_queue_put(&is->audioq, &flush_pkt);
2944  }
2945  if (is->subtitle_stream >= 0) {
2947  packet_queue_put(&is->subtitleq, &flush_pkt);
2948  }
2949  if (is->video_stream >= 0) {
2950  packet_queue_flush(&is->videoq);
2951  packet_queue_put(&is->videoq, &flush_pkt);
2952  }
2953  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2954  set_clock(&is->extclk, NAN, 0);
2955  } else {
2956  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2957  }
2958  }
2959  is->seek_req = 0;
2960  is->queue_attachments_req = 1;
2961  is->eof = 0;
2962  if (is->paused)
2963  step_to_next_frame(is);
2964  }
2965  if (is->queue_attachments_req) {
2967  AVPacket copy = { 0 };
2968  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2969  goto fail;
2970  packet_queue_put(&is->videoq, &copy);
2972  }
2973  is->queue_attachments_req = 0;
2974  }
2975 
2976  /* if the queue are full, no need to read more */
2977  if (infinite_buffer<1 &&
2978  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2979  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2982  /* wait 10 ms */
2983  SDL_LockMutex(wait_mutex);
2984  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2985  SDL_UnlockMutex(wait_mutex);
2986  continue;
2987  }
2988  if (!is->paused &&
2989  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2990  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2991  if (loop != 1 && (!loop || --loop)) {
2992  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2993  } else if (autoexit) {
2994  ret = AVERROR_EOF;
2995  goto fail;
2996  }
2997  }
2998  ret = av_read_frame(ic, pkt);
2999  if (ret < 0) {
3000  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3001  if (is->video_stream >= 0)
3003  if (is->audio_stream >= 0)
3005  if (is->subtitle_stream >= 0)
3007  is->eof = 1;
3008  }
3009  if (ic->pb && ic->pb->error)
3010  break;
3011  SDL_LockMutex(wait_mutex);
3012  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3013  SDL_UnlockMutex(wait_mutex);
3014  continue;
3015  } else {
3016  is->eof = 0;
3017  }
3018  /* check if packet is in play range specified by user, then queue, otherwise discard */
3019  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3020  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3021  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3022  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3023  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3024  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3025  <= ((double)duration / 1000000);
3026  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3027  packet_queue_put(&is->audioq, pkt);
3028  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3030  packet_queue_put(&is->videoq, pkt);
3031  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3032  packet_queue_put(&is->subtitleq, pkt);
3033  } else {
3034  av_packet_unref(pkt);
3035  }
3036  }
3037 
3038  ret = 0;
3039  fail:
3040  if (ic && !is->ic)
3041  avformat_close_input(&ic);
3042 
3043  if (ret != 0) {
3044  SDL_Event event;
3045 
3046  event.type = FF_QUIT_EVENT;
3047  event.user.data1 = is;
3048  SDL_PushEvent(&event);
3049  }
3050  SDL_DestroyMutex(wait_mutex);
3051  return 0;
3052 }
3053 
3054 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3055 {
3056  VideoState *is;
3057 
3058  is = av_mallocz(sizeof(VideoState));
3059  if (!is)
3060  return NULL;
3061  is->filename = av_strdup(filename);
3062  if (!is->filename)
3063  goto fail;
3064  is->iformat = iformat;
3065  is->ytop = 0;
3066  is->xleft = 0;
3067 
3068  /* start video display */
3069  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3070  goto fail;
3071  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3072  goto fail;
3073  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3074  goto fail;
3075 
3076  if (packet_queue_init(&is->videoq) < 0 ||
3077  packet_queue_init(&is->audioq) < 0 ||
3078  packet_queue_init(&is->subtitleq) < 0)
3079  goto fail;
3080 
3081  if (!(is->continue_read_thread = SDL_CreateCond())) {
3082  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3083  goto fail;
3084  }
3085 
3086  init_clock(&is->vidclk, &is->videoq.serial);
3087  init_clock(&is->audclk, &is->audioq.serial);
3088  init_clock(&is->extclk, &is->extclk.serial);
3089  is->audio_clock_serial = -1;
3090  if (startup_volume < 0)
3091  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3092  if (startup_volume > 100)
3093  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3094  startup_volume = av_clip(startup_volume, 0, 100);
3095  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3097  is->muted = 0;
3098  is->av_sync_type = av_sync_type;
3099  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3100  if (!is->read_tid) {
3101  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3102 fail:
3103  stream_close(is);
3104  return NULL;
3105  }
3106  return is;
3107 }
3108 
3110 {
3111  AVFormatContext *ic = is->ic;
3112  int start_index, stream_index;
3113  int old_index;
3114  AVStream *st;
3115  AVProgram *p = NULL;
3116  int nb_streams = is->ic->nb_streams;
3117 
3118  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3119  start_index = is->last_video_stream;
3120  old_index = is->video_stream;
3121  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3122  start_index = is->last_audio_stream;
3123  old_index = is->audio_stream;
3124  } else {
3125  start_index = is->last_subtitle_stream;
3126  old_index = is->subtitle_stream;
3127  }
3128  stream_index = start_index;
3129 
3130  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3132  if (p) {
3133  nb_streams = p->nb_stream_indexes;
3134  for (start_index = 0; start_index < nb_streams; start_index++)
3135  if (p->stream_index[start_index] == stream_index)
3136  break;
3137  if (start_index == nb_streams)
3138  start_index = -1;
3139  stream_index = start_index;
3140  }
3141  }
3142 
3143  for (;;) {
3144  if (++stream_index >= nb_streams)
3145  {
3146  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3147  {
3148  stream_index = -1;
3149  is->last_subtitle_stream = -1;
3150  goto the_end;
3151  }
3152  if (start_index == -1)
3153  return;
3154  stream_index = 0;
3155  }
3156  if (stream_index == start_index)
3157  return;
3158  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3159  if (st->codecpar->codec_type == codec_type) {
3160  /* check that parameters are OK */
3161  switch (codec_type) {
3162  case AVMEDIA_TYPE_AUDIO:
3163  if (st->codecpar->sample_rate != 0 &&
3164  st->codecpar->channels != 0)
3165  goto the_end;
3166  break;
3167  case AVMEDIA_TYPE_VIDEO:
3168  case AVMEDIA_TYPE_SUBTITLE:
3169  goto the_end;
3170  default:
3171  break;
3172  }
3173  }
3174  }
3175  the_end:
3176  if (p && stream_index != -1)
3177  stream_index = p->stream_index[stream_index];
3178  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3179  av_get_media_type_string(codec_type),
3180  old_index,
3181  stream_index);
3182 
3183  stream_component_close(is, old_index);
3184  stream_component_open(is, stream_index);
3185 }
3186 
3187 
3189 {
3191  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3192 }
3193 
3195 {
3196  int next = is->show_mode;
3197  do {
3198  next = (next + 1) % SHOW_MODE_NB;
3199  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3200  if (is->show_mode != next) {
3201  is->force_refresh = 1;
3202  is->show_mode = next;
3203  }
3204 }
3205 
3206 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3207  double remaining_time = 0.0;
3208  SDL_PumpEvents();
3209  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3211  SDL_ShowCursor(0);
3212  cursor_hidden = 1;
3213  }
3214  if (remaining_time > 0.0)
3215  av_usleep((int64_t)(remaining_time * 1000000.0));
3216  remaining_time = REFRESH_RATE;
3217  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3218  video_refresh(is, &remaining_time);
3219  SDL_PumpEvents();
3220  }
3221 }
3222 
3223 static void seek_chapter(VideoState *is, int incr)
3224 {
3225  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3226  int i;
3227 
3228  if (!is->ic->nb_chapters)
3229  return;
3230 
3231  /* find the current chapter */
3232  for (i = 0; i < is->ic->nb_chapters; i++) {
3233  AVChapter *ch = is->ic->chapters[i];
3234  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3235  i--;
3236  break;
3237  }
3238  }
3239 
3240  i += incr;
3241  i = FFMAX(i, 0);
3242  if (i >= is->ic->nb_chapters)
3243  return;
3244 
3245  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3246  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3247  AV_TIME_BASE_Q), 0, 0);
3248 }
3249 
3250 /* handle an event sent by the GUI */
3251 static void event_loop(VideoState *cur_stream)
3252 {
3253  SDL_Event event;
3254  double incr, pos, frac;
3255 
3256  for (;;) {
3257  double x;
3258  refresh_loop_wait_event(cur_stream, &event);
3259  switch (event.type) {
3260  case SDL_KEYDOWN:
3261  if (exit_on_keydown) {
3262  do_exit(cur_stream);
3263  break;
3264  }
3265  switch (event.key.keysym.sym) {
3266  case SDLK_ESCAPE:
3267  case SDLK_q:
3268  do_exit(cur_stream);
3269  break;
3270  case SDLK_f:
3271  toggle_full_screen(cur_stream);
3272  cur_stream->force_refresh = 1;
3273  break;
3274  case SDLK_p:
3275  case SDLK_SPACE:
3276  toggle_pause(cur_stream);
3277  break;
3278  case SDLK_m:
3279  toggle_mute(cur_stream);
3280  break;
3281  case SDLK_KP_MULTIPLY:
3282  case SDLK_0:
3283  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3284  break;
3285  case SDLK_KP_DIVIDE:
3286  case SDLK_9:
3287  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3288  break;
3289  case SDLK_s: // S: Step to next frame
3290  step_to_next_frame(cur_stream);
3291  break;
3292  case SDLK_a:
3294  break;
3295  case SDLK_v:
3297  break;
3298  case SDLK_c:
3302  break;
3303  case SDLK_t:
3305  break;
3306  case SDLK_w:
3307 #if CONFIG_AVFILTER
3308  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3309  if (++cur_stream->vfilter_idx >= nb_vfilters)
3310  cur_stream->vfilter_idx = 0;
3311  } else {
3312  cur_stream->vfilter_idx = 0;
3313  toggle_audio_display(cur_stream);
3314  }
3315 #else
3316  toggle_audio_display(cur_stream);
3317 #endif
3318  break;
3319  case SDLK_PAGEUP:
3320  if (cur_stream->ic->nb_chapters <= 1) {
3321  incr = 600.0;
3322  goto do_seek;
3323  }
3324  seek_chapter(cur_stream, 1);
3325  break;
3326  case SDLK_PAGEDOWN:
3327  if (cur_stream->ic->nb_chapters <= 1) {
3328  incr = -600.0;
3329  goto do_seek;
3330  }
3331  seek_chapter(cur_stream, -1);
3332  break;
3333  case SDLK_LEFT:
3334  incr = -10.0;
3335  goto do_seek;
3336  case SDLK_RIGHT:
3337  incr = 10.0;
3338  goto do_seek;
3339  case SDLK_UP:
3340  incr = 60.0;
3341  goto do_seek;
3342  case SDLK_DOWN:
3343  incr = -60.0;
3344  do_seek:
3345  if (seek_by_bytes) {
3346  pos = -1;
3347  if (pos < 0 && cur_stream->video_stream >= 0)
3348  pos = frame_queue_last_pos(&cur_stream->pictq);
3349  if (pos < 0 && cur_stream->audio_stream >= 0)
3350  pos = frame_queue_last_pos(&cur_stream->sampq);
3351  if (pos < 0)
3352  pos = avio_tell(cur_stream->ic->pb);
3353  if (cur_stream->ic->bit_rate)
3354  incr *= cur_stream->ic->bit_rate / 8.0;
3355  else
3356  incr *= 180000.0;
3357  pos += incr;
3358  stream_seek(cur_stream, pos, incr, 1);
3359  } else {
3360  pos = get_master_clock(cur_stream);
3361  if (isnan(pos))
3362  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3363  pos += incr;
3364  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3365  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3366  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3367  }
3368  break;
3369  default:
3370  break;
3371  }
3372  break;
3373  case SDL_MOUSEBUTTONDOWN:
3374  if (exit_on_mousedown) {
3375  do_exit(cur_stream);
3376  break;
3377  }
3378  if (event.button.button == SDL_BUTTON_LEFT) {
3379  static int64_t last_mouse_left_click = 0;
3380  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3381  toggle_full_screen(cur_stream);
3382  cur_stream->force_refresh = 1;
3383  last_mouse_left_click = 0;
3384  } else {
3385  last_mouse_left_click = av_gettime_relative();
3386  }
3387  }
3388  case SDL_MOUSEMOTION:
3389  if (cursor_hidden) {
3390  SDL_ShowCursor(1);
3391  cursor_hidden = 0;
3392  }
3394  if (event.type == SDL_MOUSEBUTTONDOWN) {
3395  if (event.button.button != SDL_BUTTON_RIGHT)
3396  break;
3397  x = event.button.x;
3398  } else {
3399  if (!(event.motion.state & SDL_BUTTON_RMASK))
3400  break;
3401  x = event.motion.x;
3402  }
3403  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3404  uint64_t size = avio_size(cur_stream->ic->pb);
3405  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3406  } else {
3407  int64_t ts;
3408  int ns, hh, mm, ss;
3409  int tns, thh, tmm, tss;
3410  tns = cur_stream->ic->duration / 1000000LL;
3411  thh = tns / 3600;
3412  tmm = (tns % 3600) / 60;
3413  tss = (tns % 60);
3414  frac = x / cur_stream->width;
3415  ns = frac * tns;
3416  hh = ns / 3600;
3417  mm = (ns % 3600) / 60;
3418  ss = (ns % 60);
3420  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3421  hh, mm, ss, thh, tmm, tss);
3422  ts = frac * cur_stream->ic->duration;
3423  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3424  ts += cur_stream->ic->start_time;
3425  stream_seek(cur_stream, ts, 0, 0);
3426  }
3427  break;
3428  case SDL_WINDOWEVENT:
3429  switch (event.window.event) {
3430  case SDL_WINDOWEVENT_RESIZED:
3431  screen_width = cur_stream->width = event.window.data1;
3432  screen_height = cur_stream->height = event.window.data2;
3433  if (cur_stream->vis_texture) {
3434  SDL_DestroyTexture(cur_stream->vis_texture);
3435  cur_stream->vis_texture = NULL;
3436  }
3437  case SDL_WINDOWEVENT_EXPOSED:
3438  cur_stream->force_refresh = 1;
3439  }
3440  break;
3441  case SDL_QUIT:
3442  case FF_QUIT_EVENT:
3443  do_exit(cur_stream);
3444  break;
3445  default:
3446  break;
3447  }
3448  }
3449 }
3450 
3451 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3452 {
3453  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3454  return opt_default(NULL, "video_size", arg);
3455 }
3456 
3457 static int opt_width(void *optctx, const char *opt, const char *arg)
3458 {
3459  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3460  return 0;
3461 }
3462 
3463 static int opt_height(void *optctx, const char *opt, const char *arg)
3464 {
3465  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3466  return 0;
3467 }
3468 
3469 static int opt_format(void *optctx, const char *opt, const char *arg)
3470 {
3471  file_iformat = av_find_input_format(arg);
3472  if (!file_iformat) {
3473  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3474  return AVERROR(EINVAL);
3475  }
3476  return 0;
3477 }
3478 
3479 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3480 {
3481  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3482  return opt_default(NULL, "pixel_format", arg);
3483 }
3484 
3485 static int opt_sync(void *optctx, const char *opt, const char *arg)
3486 {
3487  if (!strcmp(arg, "audio"))
3489  else if (!strcmp(arg, "video"))
3491  else if (!strcmp(arg, "ext"))
3493  else {
3494  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3495  exit(1);
3496  }
3497  return 0;
3498 }
3499 
3500 static int opt_seek(void *optctx, const char *opt, const char *arg)
3501 {
3502  start_time = parse_time_or_die(opt, arg, 1);
3503  return 0;
3504 }
3505 
3506 static int opt_duration(void *optctx, const char *opt, const char *arg)
3507 {
3508  duration = parse_time_or_die(opt, arg, 1);
3509  return 0;
3510 }
3511 
3512 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3513 {
3514  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3515  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3516  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3517  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3518  return 0;
3519 }
3520 
3521 static void opt_input_file(void *optctx, const char *filename)
3522 {
3523  if (input_filename) {
3525  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3526  filename, input_filename);
3527  exit(1);
3528  }
3529  if (!strcmp(filename, "-"))
3530  filename = "pipe:";
3531  input_filename = filename;
3532 }
3533 
3534 static int opt_codec(void *optctx, const char *opt, const char *arg)
3535 {
3536  const char *spec = strchr(opt, ':');
3537  if (!spec) {
3539  "No media specifier was specified in '%s' in option '%s'\n",
3540  arg, opt);
3541  return AVERROR(EINVAL);
3542  }
3543  spec++;
3544  switch (spec[0]) {
3545  case 'a' : audio_codec_name = arg; break;
3546  case 's' : subtitle_codec_name = arg; break;
3547  case 'v' : video_codec_name = arg; break;
3548  default:
3550  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3551  return AVERROR(EINVAL);
3552  }
3553  return 0;
3554 }
3555 
3556 static int dummy;
3557 
3558 static const OptionDef options[] = {
3560  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3561  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3562  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3563  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3564  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3565  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3566  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3567  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3568  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3569  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3570  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3571  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3572  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3573  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3574  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3575  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3576  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3577  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3578  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3579  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3580  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3581  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3582  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3583  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3584  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3585  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3586  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3587  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3588  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3589  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3590  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3591 #if CONFIG_AVFILTER
3592  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3593  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3594 #endif
3595  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3596  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3597  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3598  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3599  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3600  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3601  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3602  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3603  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3604  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3605  "read and decode the streams to fill missing information with heuristics" },
3606  { NULL, },
3607 };
3608 
3609 static void show_usage(void)
3610 {
3611  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3612  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3613  av_log(NULL, AV_LOG_INFO, "\n");
3614 }
3615 
3616 void show_help_default(const char *opt, const char *arg)
3617 {
3619  show_usage();
3620  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3621  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3622  printf("\n");
3625 #if !CONFIG_AVFILTER
3627 #else
3629 #endif
3630  printf("\nWhile playing:\n"
3631  "q, ESC quit\n"
3632  "f toggle full screen\n"
3633  "p, SPC pause\n"
3634  "m toggle mute\n"
3635  "9, 0 decrease and increase volume respectively\n"
3636  "/, * decrease and increase volume respectively\n"
3637  "a cycle audio channel in the current program\n"
3638  "v cycle video channel\n"
3639  "t cycle subtitle channel in the current program\n"
3640  "c cycle program\n"
3641  "w cycle video filters or show modes\n"
3642  "s activate frame-step mode\n"
3643  "left/right seek backward/forward 10 seconds\n"
3644  "down/up seek backward/forward 1 minute\n"
3645  "page down/page up seek backward/forward 10 minutes\n"
3646  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3647  "left double-click toggle full screen\n"
3648  );
3649 }
3650 
3651 static int lockmgr(void **mtx, enum AVLockOp op)
3652 {
3653  switch(op) {
3654  case AV_LOCK_CREATE:
3655  *mtx = SDL_CreateMutex();
3656  if(!*mtx) {
3657  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3658  return 1;
3659  }
3660  return 0;
3661  case AV_LOCK_OBTAIN:
3662  return !!SDL_LockMutex(*mtx);
3663  case AV_LOCK_RELEASE:
3664  return !!SDL_UnlockMutex(*mtx);
3665  case AV_LOCK_DESTROY:
3666  SDL_DestroyMutex(*mtx);
3667  return 0;
3668  }
3669  return 1;
3670 }
3671 
3672 /* Called from the main */
3673 int main(int argc, char **argv)
3674 {
3675  int flags;
3676  VideoState *is;
3677 
3678  init_dynload();
3679 
3681  parse_loglevel(argc, argv, options);
3682 
3683  /* register all codecs, demux and protocols */
3684 #if CONFIG_AVDEVICE
3686 #endif
3687 #if CONFIG_AVFILTER
3689 #endif
3690  av_register_all();
3692 
3693  init_opts();
3694 
3695  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3696  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3697 
3698  show_banner(argc, argv, options);
3699 
3700  parse_options(NULL, argc, argv, options, opt_input_file);
3701 
3702  if (!input_filename) {
3703  show_usage();
3704  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3706  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3707  exit(1);
3708  }
3709 
3710  if (display_disable) {
3711  video_disable = 1;
3712  }
3713  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3714  if (audio_disable)
3715  flags &= ~SDL_INIT_AUDIO;
3716  else {
3717  /* Try to work around an occasional ALSA buffer underflow issue when the
3718  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3719  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3720  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3721  }
3722  if (display_disable)
3723  flags &= ~SDL_INIT_VIDEO;
3724  if (SDL_Init (flags)) {
3725  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3726  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3727  exit(1);
3728  }
3729 
3730  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3731  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3732 
3734  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3735  do_exit(NULL);
3736  }
3737 
3738  av_init_packet(&flush_pkt);
3739  flush_pkt.data = (uint8_t *)&flush_pkt;
3740 
3741  if (!display_disable) {
3742  int flags = SDL_WINDOW_HIDDEN;
3743  if (borderless)
3744  flags |= SDL_WINDOW_BORDERLESS;
3745  else
3746  flags |= SDL_WINDOW_RESIZABLE;
3747  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3748  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3749  if (window) {
3750  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3751  if (!renderer) {
3752  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3753  renderer = SDL_CreateRenderer(window, -1, 0);
3754  }
3755  if (renderer) {
3756  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3757  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3758  }
3759  }
3760  if (!window || !renderer || !renderer_info.num_texture_formats) {
3761  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3762  do_exit(NULL);
3763  }
3764  }
3765 
3766  is = stream_open(input_filename, file_iformat);
3767  if (!is) {
3768  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3769  do_exit(NULL);
3770  }
3771 
3772  event_loop(is);
3773 
3774  /* never returns */
3775 
3776  return 0;
3777 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1556
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:857
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:518
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3512
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:119
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:772
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:82
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:365
static void video_image_display(VideoState *is)
Definition: ffplay.c:956
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:492
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:493
const char * s
Definition: avisynth_c.h:768
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:168
static double get_clock(Clock *c)
Definition: ffplay.c:1356
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3463
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3498
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2469
FrameQueue pictq
Definition: ffplay.c:223
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:201
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:363
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4094
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1605
double rdftspeed
Definition: ffplay.c:343
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:170
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3521
double get_rotation(AVStream *st)
Definition: cmdutils.c:2098
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:473
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:1960
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3469
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:80
Unlock the mutex.
Definition: avcodec.h:6334
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1404
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4228
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1043
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:709
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4913
static int64_t cur_time
Definition: ffserver.c:252
#define OPT_AUDIO
Definition: cmdutils.h:171
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3109
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3479
int size
Definition: avcodec.h:1680
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1468
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1454
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1451
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1150
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3534
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:4132
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1368
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1481
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:531
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:222
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:830
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1002
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:368
static const char * audio_codec_name
Definition: ffplay.c:340
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3739
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3223
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:231
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2002
This struct describes the properties of an encoded stream.
Definition: avcodec.h:4144
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:6331
int width
Definition: ffplay.c:161
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:860
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3054
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:4133
Format I/O context.
Definition: avformat.h:1349
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3194
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4936
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:295
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1281
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:4096
double pts
Definition: ffplay.c:158
static AVFilter ** last_filter
Definition: avfilter.c:577
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2732
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:725
static int64_t start_time
Definition: ffplay.c:327
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2531
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:98
Lock the mutex.
Definition: avcodec.h:6333
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:544
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:4218
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
#define HAS_ARG
Definition: cmdutils.h:166
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1184
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2702
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1697
int finished
Definition: ffplay.c:193
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3251
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:403
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4957
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:294
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:511
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1461
static int framedrop
Definition: ffplay.c:337
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:84
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1417
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2354
int bytes_per_sec
Definition: ffplay.c:140
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:407
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:356
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:414
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1460
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:538
static void sigterm_handler(int sig)
Definition: ffplay.c:1300
uint8_t * data
Definition: avcodec.h:1679
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:382
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:673
static int flags
Definition: log.c:57
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4848
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
Definition: mxfdec.c:271
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:505
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:4097
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:556
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:176
static int64_t duration
Definition: ffplay.c:328
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1280
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:627
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2059
AVDictionary * format_opts
Definition: cmdutils.c:72
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:833
static int borderless
Definition: ffplay.c:323
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1487
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:555
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4104
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3753
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4087
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1567
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:356
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:350
int(* callback)(void *)
Definition: avio.h:59
Create a mutex.
Definition: avcodec.h:6332
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1556
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:293
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:4095
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:782
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:163
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2090
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:798
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:713
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:465
static int autorotate
Definition: ffplay.c:351
int capabilities
Codec capabilities.
Definition: avcodec.h:3758
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4171
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1507
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:4148
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1856
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:422
AVChapter ** chapters
Definition: avformat.h:1557
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
uint16_t width
Definition: gdv.c:47
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1241
static int find_stream_info
Definition: ffplay.c:352
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:358
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1313
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1388
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:887
static enum ShowMode show_mode
Definition: ffplay.c:339
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1276
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:529
static const OptionDef options[]
Definition: ffplay.c:3558
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3556
#define fail()
Definition: checkasm.h:109
int8_t exp
Definition: eval.c:65
enum AVPixelFormat format
Definition: ffplay.c:368
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2574
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:70
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3485
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2344
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2245
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:4131
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:674
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:4134
static int genpts
Definition: ffplay.c:330
static AVPacket flush_pkt
Definition: ffplay.c:358
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:379
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:582
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:469
static const char * subtitle_codec_name
Definition: ffplay.c:341
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:135
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1405
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:362
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4180
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:164
int linesize[4]
Definition: avcodec.h:4112
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:506
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:284
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:1425
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:557
static int cursor_hidden
Definition: ffplay.c:345
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:576
AVSubtitle sub
Definition: ffplay.c:156
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3651
int width
picture width / height.
Definition: avcodec.h:1948
int main(int argc, char **argv)
Definition: ffplay.c:3673
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3609
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3457
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1745
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3206
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1382
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:329
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2284
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:4129
#define OPT_INT64
Definition: cmdutils.h:175
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1499
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:65
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2313
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:355
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:822
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2708
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:389
static int startup_volume
Definition: ffplay.c:324
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:476
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:4111
static int decoder_reorder_pts
Definition: ffplay.c:332
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1376
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1037
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:735
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3616
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:583
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1726
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1691
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1032
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:274
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:372
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:646
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:355
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4861
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3506
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:379
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:333
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:237
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1769
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:756
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:856
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
enum AVCodecID codec_id
Definition: avcodec.h:1778
static void do_exit(VideoState *is)
Definition: ffplay.c:1278
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:232
AVIOContext * pb
I/O context.
Definition: avformat.h:1391
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:944
static int loop
Definition: ffplay.c:336
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:334
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1761
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:1275
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:618
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:353
Decoder subdec
Definition: ffplay.c:229
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1144
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:289
Clock vidclk
Definition: ffplay.c:220
int x
Definition: f_ebur128.c:91
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:63
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:753
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:493
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1537
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:312
double pts
Definition: ffplay.c:144
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:730
static int audio_thread(void *arg)
Definition: ffplay.c:2003
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static int av_sync_type
Definition: ffplay.c:326
int pkt_serial
Definition: ffplay.c:192
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:718
static const char * format
Definition: movenc.c:47
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
static SDL_RendererInfo renderer_info
Definition: ffplay.c:364
int sample_rate
Sample rate of the audio data.
Definition: frame.h:374
int configure_filtergraph(FilterGraph *fg)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1040
PacketQueue audioq
Definition: ffplay.c:242
int packet_pending
Definition: ffplay.c:194
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
int64_t seek_pos
Definition: ffplay.c:213
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:288
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:164
#define OPT_STRING
Definition: cmdutils.h:169
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1042
SDL_cond * cond
Definition: ffplay.c:126
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2424
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:585
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:825
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2715
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1439
int queue_attachments_req
Definition: ffplay.c:210
unsigned nb_filters
Definition: avfilter.h:858
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:627
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:466
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:679
int error
contains the error code or 0 if no error happened
Definition: avio.h:246
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:706
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1713
int audio_stream
Definition: ffplay.c:231
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2473
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:143
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2543
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:371
char * name
unique name for this input/output in the list
Definition: avfilter.h:1034
static int64_t cursor_last_shown
Definition: ffplay.c:344
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:685
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3451
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:505
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:905
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:1280
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1714
SDL_Texture * sub_texture
Definition: ffplay.c:273
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1434
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:215
int frame_drops_early
Definition: ffplay.c:258
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2265
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
SDL_Texture * vid_texture
Definition: ffplay.c:274
int sample_array_index
Definition: ffplay.c:265
SDL_cond * continue_read_thread
Definition: ffplay.c:306
int64_t start
Definition: avformat.h:1309
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:706
int sample_rate
Audio only.
Definition: avcodec.h:4262
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:804
#define OPT_BOOL
Definition: cmdutils.h:167
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:310
double speed
Definition: ffplay.c:147
static int exit_on_mousedown
Definition: ffplay.c:335
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:73
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:1140
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1051
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static int video_thread(void *arg)
Definition: ffplay.c:2101
#define OPT_INT
Definition: cmdutils.h:172
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:198
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1305
AVDictionary * codec_opts
Definition: cmdutils.c:72
struct AudioParams audio_tgt
Definition: ffplay.c:256
sample_rates
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:6335
if(ret< 0)
Definition: vf_mcdeint.c:279
uint8_t * audio_buf
Definition: ffplay.c:244
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
int muted
Definition: ffplay.c:251
static int display_disable
Definition: ffplay.c:322
static int video_disable
Definition: ffplay.c:318
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3498
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:936
signed 16 bits
Definition: samplefmt.h:61
int audio_buf_index
Definition: ffplay.c:248
uint8_t * audio_buf1
Definition: ffplay.c:245
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3500
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
static int screen_width
Definition: ffplay.c:315
PacketQueue * pktq
Definition: ffplay.c:179
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:950
uint32_t start_display_time
Definition: avcodec.h:4130
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1492
FFTSample * rdft_data
Definition: ffplay.c:269
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1549
int audio_clock_serial
Definition: ffplay.c:236
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:87
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:367
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1308
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:89
PacketQueue subtitleq
Definition: ffplay.c:278
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1361
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4339
static int lowres
Definition: ffplay.c:331
int eof
Definition: ffplay.c:289
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:639
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:338
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:494
double duration
Definition: ffplay.c:159
int pixels
Definition: avisynth_c.h:429
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:87
int eof_reached
true if eof reached
Definition: avio.h:240
#define NAN
Definition: math.h:28
int len
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:354
int channels
number of audio channels
Definition: avcodec.h:2524
unsigned int audio_buf1_size
Definition: ffplay.c:247
int av_buffersink_get_channels(const AVFilterContext *ctx)
SDL_Thread * read_tid
Definition: ffplay.c:204
AVPacket pkt
Definition: ffplay.c:189
int frame_size
Definition: ffplay.c:139
void av_log_set_flags(int arg)
Definition: log.c:396
int64_t start_pts
Definition: ffplay.c:196
int abort_request
Definition: ffplay.c:206
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:813
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:451
double last_updated
Definition: ffplay.c:146
Decoder viddec
Definition: ffplay.c:228
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:71
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:510
int height
Definition: ffplay.c:292
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:205
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1863
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:538
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:366
int channels
Audio only.
Definition: avcodec.h:4258
An instance of a filter.
Definition: avfilter.h:338
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1678
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1444
int height
Definition: frame.h:259
FILE * out
Definition: movenc.c:54
static const char * video_codec_name
Definition: ffplay.c:342
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3497
PacketQueue * queue
Definition: ffplay.c:190
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:664
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:740
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int subtitle_thread(void *arg)
Definition: ffplay.c:2210
FrameQueue subpq
Definition: ffplay.c:224
int format
Definition: ffplay.c:163
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Definition: avformat.h:1252
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1421
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:173
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:356
#define FF_QUIT_EVENT
Definition: ffplay.c:360
int xleft
Definition: ffplay.c:292
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2335
int stream_index
Definition: avcodec.h:1681
#define OPT_INPUT
Definition: cmdutils.h:186
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
int subtitle_stream
Definition: ffplay.c:276
unsigned int audio_buf_size
Definition: ffplay.c:246
int64_t seek_rel
Definition: ffplay.c:214
int realtime
Definition: ffplay.c:217
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:248
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:952
static void video_display(VideoState *is)
Definition: ffplay.c:1342
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:320
static int show_status
Definition: ffplay.c:325
static int compute_mod(int a, int b)
Definition: ffplay.c:1037
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1656
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:390
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2426
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:267
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1396
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:1002
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3188
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1672
double last_vis_time
Definition: ffplay.c:271
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:979
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define av_unused
Definition: attributes.h:125
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:357
AVFormatContext * ic
Definition: ffplay.c:216
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int audio_volume
Definition: ffplay.c:250
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:756