FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
49 
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53 
54 #include <SDL.h>
55 #include <SDL_thread.h>
56 
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60 
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68 
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73 
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107 
108 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 typedef struct FrameData {
149 } FrameData;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  int width;
160  int height;
161  int format;
163  int uploaded;
164  int flip_v;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
191  int finished;
193  SDL_cond *empty_queue_cond;
198  SDL_Thread *decoder_tid;
199 } Decoder;
200 
201 typedef struct VideoState {
202  SDL_Thread *read_tid;
206  int paused;
209  int seek_req;
215  int realtime;
216 
220 
224 
228 
230 
232 
233  double audio_clock;
235  double audio_diff_cum; /* used for AV difference average computation */
242  uint8_t *audio_buf;
243  uint8_t *audio_buf1;
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249  int muted;
256 
257  enum ShowMode {
259  } show_mode;
266  float *real_data;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354 
355 /* current context */
356 static int is_full_screen;
358 
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365 
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391 };
392 
393 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
394 {
396  if (ret < 0)
397  return ret;
398 
400  if (!vfilters_list[nb_vfilters - 1])
401  return AVERROR(ENOMEM);
402 
403  return 0;
404 }
405 
406 static inline
407 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
408  enum AVSampleFormat fmt2, int64_t channel_count2)
409 {
410  /* If channel count == 1, planar and non-planar formats are the same */
411  if (channel_count1 == 1 && channel_count2 == 1)
413  else
414  return channel_count1 != channel_count2 || fmt1 != fmt2;
415 }
416 
418 {
419  MyAVPacketList pkt1;
420  int ret;
421 
422  if (q->abort_request)
423  return -1;
424 
425 
426  pkt1.pkt = pkt;
427  pkt1.serial = q->serial;
428 
429  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
430  if (ret < 0)
431  return ret;
432  q->nb_packets++;
433  q->size += pkt1.pkt->size + sizeof(pkt1);
434  q->duration += pkt1.pkt->duration;
435  /* XXX: should duplicate packet data in DV case */
436  SDL_CondSignal(q->cond);
437  return 0;
438 }
439 
441 {
442  AVPacket *pkt1;
443  int ret;
444 
445  pkt1 = av_packet_alloc();
446  if (!pkt1) {
448  return -1;
449  }
450  av_packet_move_ref(pkt1, pkt);
451 
452  SDL_LockMutex(q->mutex);
453  ret = packet_queue_put_private(q, pkt1);
454  SDL_UnlockMutex(q->mutex);
455 
456  if (ret < 0)
457  av_packet_free(&pkt1);
458 
459  return ret;
460 }
461 
462 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
463 {
464  pkt->stream_index = stream_index;
465  return packet_queue_put(q, pkt);
466 }
467 
468 /* packet queue handling */
470 {
471  memset(q, 0, sizeof(PacketQueue));
473  if (!q->pkt_list)
474  return AVERROR(ENOMEM);
475  q->mutex = SDL_CreateMutex();
476  if (!q->mutex) {
477  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
478  return AVERROR(ENOMEM);
479  }
480  q->cond = SDL_CreateCond();
481  if (!q->cond) {
482  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
483  return AVERROR(ENOMEM);
484  }
485  q->abort_request = 1;
486  return 0;
487 }
488 
490 {
491  MyAVPacketList pkt1;
492 
493  SDL_LockMutex(q->mutex);
494  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
495  av_packet_free(&pkt1.pkt);
496  q->nb_packets = 0;
497  q->size = 0;
498  q->duration = 0;
499  q->serial++;
500  SDL_UnlockMutex(q->mutex);
501 }
502 
504 {
507  SDL_DestroyMutex(q->mutex);
508  SDL_DestroyCond(q->cond);
509 }
510 
512 {
513  SDL_LockMutex(q->mutex);
514 
515  q->abort_request = 1;
516 
517  SDL_CondSignal(q->cond);
518 
519  SDL_UnlockMutex(q->mutex);
520 }
521 
523 {
524  SDL_LockMutex(q->mutex);
525  q->abort_request = 0;
526  q->serial++;
527  SDL_UnlockMutex(q->mutex);
528 }
529 
530 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
531 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
532 {
533  MyAVPacketList pkt1;
534  int ret;
535 
536  SDL_LockMutex(q->mutex);
537 
538  for (;;) {
539  if (q->abort_request) {
540  ret = -1;
541  break;
542  }
543 
544  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
545  q->nb_packets--;
546  q->size -= pkt1.pkt->size + sizeof(pkt1);
547  q->duration -= pkt1.pkt->duration;
548  av_packet_move_ref(pkt, pkt1.pkt);
549  if (serial)
550  *serial = pkt1.serial;
551  av_packet_free(&pkt1.pkt);
552  ret = 1;
553  break;
554  } else if (!block) {
555  ret = 0;
556  break;
557  } else {
558  SDL_CondWait(q->cond, q->mutex);
559  }
560  }
561  SDL_UnlockMutex(q->mutex);
562  return ret;
563 }
564 
565 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
566  memset(d, 0, sizeof(Decoder));
567  d->pkt = av_packet_alloc();
568  if (!d->pkt)
569  return AVERROR(ENOMEM);
570  d->avctx = avctx;
571  d->queue = queue;
572  d->empty_queue_cond = empty_queue_cond;
574  d->pkt_serial = -1;
575  return 0;
576 }
577 
579  int ret = AVERROR(EAGAIN);
580 
581  for (;;) {
582  if (d->queue->serial == d->pkt_serial) {
583  do {
584  if (d->queue->abort_request)
585  return -1;
586 
587  switch (d->avctx->codec_type) {
588  case AVMEDIA_TYPE_VIDEO:
590  if (ret >= 0) {
591  if (decoder_reorder_pts == -1) {
592  frame->pts = frame->best_effort_timestamp;
593  } else if (!decoder_reorder_pts) {
594  frame->pts = frame->pkt_dts;
595  }
596  }
597  break;
598  case AVMEDIA_TYPE_AUDIO:
600  if (ret >= 0) {
601  AVRational tb = (AVRational){1, frame->sample_rate};
602  if (frame->pts != AV_NOPTS_VALUE)
603  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
604  else if (d->next_pts != AV_NOPTS_VALUE)
605  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
606  if (frame->pts != AV_NOPTS_VALUE) {
607  d->next_pts = frame->pts + frame->nb_samples;
608  d->next_pts_tb = tb;
609  }
610  }
611  break;
612  }
613  if (ret == AVERROR_EOF) {
614  d->finished = d->pkt_serial;
616  return 0;
617  }
618  if (ret >= 0)
619  return 1;
620  } while (ret != AVERROR(EAGAIN));
621  }
622 
623  do {
624  if (d->queue->nb_packets == 0)
625  SDL_CondSignal(d->empty_queue_cond);
626  if (d->packet_pending) {
627  d->packet_pending = 0;
628  } else {
629  int old_serial = d->pkt_serial;
630  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
631  return -1;
632  if (old_serial != d->pkt_serial) {
634  d->finished = 0;
635  d->next_pts = d->start_pts;
636  d->next_pts_tb = d->start_pts_tb;
637  }
638  }
639  if (d->queue->serial == d->pkt_serial)
640  break;
641  av_packet_unref(d->pkt);
642  } while (1);
643 
645  int got_frame = 0;
646  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
647  if (ret < 0) {
648  ret = AVERROR(EAGAIN);
649  } else {
650  if (got_frame && !d->pkt->data) {
651  d->packet_pending = 1;
652  }
653  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
654  }
655  av_packet_unref(d->pkt);
656  } else {
657  if (d->pkt->buf && !d->pkt->opaque_ref) {
658  FrameData *fd;
659 
660  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
661  if (!d->pkt->opaque_ref)
662  return AVERROR(ENOMEM);
663  fd = (FrameData*)d->pkt->opaque_ref->data;
664  fd->pkt_pos = d->pkt->pos;
665  }
666 
667  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
668  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
669  d->packet_pending = 1;
670  } else {
671  av_packet_unref(d->pkt);
672  }
673  }
674  }
675 }
676 
677 static void decoder_destroy(Decoder *d) {
678  av_packet_free(&d->pkt);
680 }
681 
683 {
684  av_frame_unref(vp->frame);
685  avsubtitle_free(&vp->sub);
686 }
687 
688 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
689 {
690  int i;
691  memset(f, 0, sizeof(FrameQueue));
692  if (!(f->mutex = SDL_CreateMutex())) {
693  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
694  return AVERROR(ENOMEM);
695  }
696  if (!(f->cond = SDL_CreateCond())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  f->pktq = pktq;
701  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
702  f->keep_last = !!keep_last;
703  for (i = 0; i < f->max_size; i++)
704  if (!(f->queue[i].frame = av_frame_alloc()))
705  return AVERROR(ENOMEM);
706  return 0;
707 }
708 
710 {
711  int i;
712  for (i = 0; i < f->max_size; i++) {
713  Frame *vp = &f->queue[i];
715  av_frame_free(&vp->frame);
716  }
717  SDL_DestroyMutex(f->mutex);
718  SDL_DestroyCond(f->cond);
719 }
720 
722 {
723  SDL_LockMutex(f->mutex);
724  SDL_CondSignal(f->cond);
725  SDL_UnlockMutex(f->mutex);
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
731 }
732 
734 {
735  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
736 }
737 
739 {
740  return &f->queue[f->rindex];
741 }
742 
744 {
745  /* wait until we have space to put a new frame */
746  SDL_LockMutex(f->mutex);
747  while (f->size >= f->max_size &&
748  !f->pktq->abort_request) {
749  SDL_CondWait(f->cond, f->mutex);
750  }
751  SDL_UnlockMutex(f->mutex);
752 
753  if (f->pktq->abort_request)
754  return NULL;
755 
756  return &f->queue[f->windex];
757 }
758 
760 {
761  /* wait until we have a readable a new frame */
762  SDL_LockMutex(f->mutex);
763  while (f->size - f->rindex_shown <= 0 &&
764  !f->pktq->abort_request) {
765  SDL_CondWait(f->cond, f->mutex);
766  }
767  SDL_UnlockMutex(f->mutex);
768 
769  if (f->pktq->abort_request)
770  return NULL;
771 
772  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
773 }
774 
776 {
777  if (++f->windex == f->max_size)
778  f->windex = 0;
779  SDL_LockMutex(f->mutex);
780  f->size++;
781  SDL_CondSignal(f->cond);
782  SDL_UnlockMutex(f->mutex);
783 }
784 
786 {
787  if (f->keep_last && !f->rindex_shown) {
788  f->rindex_shown = 1;
789  return;
790  }
791  frame_queue_unref_item(&f->queue[f->rindex]);
792  if (++f->rindex == f->max_size)
793  f->rindex = 0;
794  SDL_LockMutex(f->mutex);
795  f->size--;
796  SDL_CondSignal(f->cond);
797  SDL_UnlockMutex(f->mutex);
798 }
799 
800 /* return the number of undisplayed frames in the queue */
802 {
803  return f->size - f->rindex_shown;
804 }
805 
806 /* return last shown position */
808 {
809  Frame *fp = &f->queue[f->rindex];
810  if (f->rindex_shown && fp->serial == f->pktq->serial)
811  return fp->pos;
812  else
813  return -1;
814 }
815 
816 static void decoder_abort(Decoder *d, FrameQueue *fq)
817 {
819  frame_queue_signal(fq);
820  SDL_WaitThread(d->decoder_tid, NULL);
821  d->decoder_tid = NULL;
823 }
824 
825 static inline void fill_rectangle(int x, int y, int w, int h)
826 {
827  SDL_Rect rect;
828  rect.x = x;
829  rect.y = y;
830  rect.w = w;
831  rect.h = h;
832  if (w && h)
833  SDL_RenderFillRect(renderer, &rect);
834 }
835 
836 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
837 {
838  Uint32 format;
839  int access, w, h;
840  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
841  void *pixels;
842  int pitch;
843  if (*texture)
844  SDL_DestroyTexture(*texture);
845  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
846  return -1;
847  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
848  return -1;
849  if (init_texture) {
850  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
851  return -1;
852  memset(pixels, 0, pitch * new_height);
853  SDL_UnlockTexture(*texture);
854  }
855  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
856  }
857  return 0;
858 }
859 
860 static void calculate_display_rect(SDL_Rect *rect,
861  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
862  int pic_width, int pic_height, AVRational pic_sar)
863 {
864  AVRational aspect_ratio = pic_sar;
865  int64_t width, height, x, y;
866 
867  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
868  aspect_ratio = av_make_q(1, 1);
869 
870  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
871 
872  /* XXX: we suppose the screen has a 1.0 pixel ratio */
873  height = scr_height;
874  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
875  if (width > scr_width) {
876  width = scr_width;
877  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
878  }
879  x = (scr_width - width) / 2;
880  y = (scr_height - height) / 2;
881  rect->x = scr_xleft + x;
882  rect->y = scr_ytop + y;
883  rect->w = FFMAX((int)width, 1);
884  rect->h = FFMAX((int)height, 1);
885 }
886 
887 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
888 {
889  int i;
890  *sdl_blendmode = SDL_BLENDMODE_NONE;
891  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
892  if (format == AV_PIX_FMT_RGB32 ||
896  *sdl_blendmode = SDL_BLENDMODE_BLEND;
897  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) {
899  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
900  return;
901  }
902  }
903 }
904 
905 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
906 {
907  int ret = 0;
908  Uint32 sdl_pix_fmt;
909  SDL_BlendMode sdl_blendmode;
910  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
911  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
912  return -1;
913  switch (sdl_pix_fmt) {
914  case SDL_PIXELFORMAT_IYUV:
915  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
916  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
917  frame->data[1], frame->linesize[1],
918  frame->data[2], frame->linesize[2]);
919  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
920  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
921  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
922  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
923  } else {
924  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
925  return -1;
926  }
927  break;
928  default:
929  if (frame->linesize[0] < 0) {
930  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
931  } else {
932  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
933  }
934  break;
935  }
936  return ret;
937 }
938 
943 };
944 
946 {
947 #if SDL_VERSION_ATLEAST(2,0,8)
948  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
949  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
950  if (frame->color_range == AVCOL_RANGE_JPEG)
951  mode = SDL_YUV_CONVERSION_JPEG;
952  else if (frame->colorspace == AVCOL_SPC_BT709)
953  mode = SDL_YUV_CONVERSION_BT709;
954  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
955  mode = SDL_YUV_CONVERSION_BT601;
956  }
957  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
958 #endif
959 }
960 
962 {
963  Frame *vp;
964  Frame *sp = NULL;
965  SDL_Rect rect;
966 
967  vp = frame_queue_peek_last(&is->pictq);
968  if (vk_renderer) {
970  return;
971  }
972 
973  if (is->subtitle_st) {
974  if (frame_queue_nb_remaining(&is->subpq) > 0) {
975  sp = frame_queue_peek(&is->subpq);
976 
977  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
978  if (!sp->uploaded) {
979  uint8_t* pixels[4];
980  int pitch[4];
981  int i;
982  if (!sp->width || !sp->height) {
983  sp->width = vp->width;
984  sp->height = vp->height;
985  }
986  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
987  return;
988 
989  for (i = 0; i < sp->sub.num_rects; i++) {
990  AVSubtitleRect *sub_rect = sp->sub.rects[i];
991 
992  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
993  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
994  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
995  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
996 
997  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
998  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
999  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1000  0, NULL, NULL, NULL);
1001  if (!is->sub_convert_ctx) {
1002  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1003  return;
1004  }
1005  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1006  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1007  0, sub_rect->h, pixels, pitch);
1008  SDL_UnlockTexture(is->sub_texture);
1009  }
1010  }
1011  sp->uploaded = 1;
1012  }
1013  } else
1014  sp = NULL;
1015  }
1016  }
1017 
1018  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1020 
1021  if (!vp->uploaded) {
1022  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1024  return;
1025  }
1026  vp->uploaded = 1;
1027  vp->flip_v = vp->frame->linesize[0] < 0;
1028  }
1029 
1030  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1032  if (sp) {
1033 #if USE_ONEPASS_SUBTITLE_RENDER
1034  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1035 #else
1036  int i;
1037  double xratio = (double)rect.w / (double)sp->width;
1038  double yratio = (double)rect.h / (double)sp->height;
1039  for (i = 0; i < sp->sub.num_rects; i++) {
1040  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1041  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1042  .y = rect.y + sub_rect->y * yratio,
1043  .w = sub_rect->w * xratio,
1044  .h = sub_rect->h * yratio};
1045  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1046  }
1047 #endif
1048  }
1049 }
1050 
1051 static inline int compute_mod(int a, int b)
1052 {
1053  return a < 0 ? a%b + b : a%b;
1054 }
1055 
1057 {
1058  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1059  int ch, channels, h, h2;
1060  int64_t time_diff;
1061  int rdft_bits, nb_freq;
1062 
1063  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1064  ;
1065  nb_freq = 1 << (rdft_bits - 1);
1066 
1067  /* compute display index : center on currently output samples */
1068  channels = s->audio_tgt.ch_layout.nb_channels;
1069  nb_display_channels = channels;
1070  if (!s->paused) {
1071  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1072  n = 2 * channels;
1073  delay = s->audio_write_buf_size;
1074  delay /= n;
1075 
1076  /* to be more precise, we take into account the time spent since
1077  the last buffer computation */
1078  if (audio_callback_time) {
1079  time_diff = av_gettime_relative() - audio_callback_time;
1080  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1081  }
1082 
1083  delay += 2 * data_used;
1084  if (delay < data_used)
1085  delay = data_used;
1086 
1087  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1088  if (s->show_mode == SHOW_MODE_WAVES) {
1089  h = INT_MIN;
1090  for (i = 0; i < 1000; i += channels) {
1091  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1092  int a = s->sample_array[idx];
1093  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1094  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1095  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1096  int score = a - d;
1097  if (h < score && (b ^ c) < 0) {
1098  h = score;
1099  i_start = idx;
1100  }
1101  }
1102  }
1103 
1104  s->last_i_start = i_start;
1105  } else {
1106  i_start = s->last_i_start;
1107  }
1108 
1109  if (s->show_mode == SHOW_MODE_WAVES) {
1110  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1111 
1112  /* total height for one channel */
1113  h = s->height / nb_display_channels;
1114  /* graph height / 2 */
1115  h2 = (h * 9) / 20;
1116  for (ch = 0; ch < nb_display_channels; ch++) {
1117  i = i_start + ch;
1118  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1119  for (x = 0; x < s->width; x++) {
1120  y = (s->sample_array[i] * h2) >> 15;
1121  if (y < 0) {
1122  y = -y;
1123  ys = y1 - y;
1124  } else {
1125  ys = y1;
1126  }
1127  fill_rectangle(s->xleft + x, ys, 1, y);
1128  i += channels;
1129  if (i >= SAMPLE_ARRAY_SIZE)
1130  i -= SAMPLE_ARRAY_SIZE;
1131  }
1132  }
1133 
1134  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1135 
1136  for (ch = 1; ch < nb_display_channels; ch++) {
1137  y = s->ytop + ch * h;
1138  fill_rectangle(s->xleft, y, s->width, 1);
1139  }
1140  } else {
1141  int err = 0;
1142  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1143  return;
1144 
1145  if (s->xpos >= s->width)
1146  s->xpos = 0;
1147  nb_display_channels= FFMIN(nb_display_channels, 2);
1148  if (rdft_bits != s->rdft_bits) {
1149  const float rdft_scale = 1.0;
1150  av_tx_uninit(&s->rdft);
1151  av_freep(&s->real_data);
1152  av_freep(&s->rdft_data);
1153  s->rdft_bits = rdft_bits;
1154  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1155  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1156  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1157  0, 1 << rdft_bits, &rdft_scale, 0);
1158  }
1159  if (err < 0 || !s->rdft_data) {
1160  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1161  s->show_mode = SHOW_MODE_WAVES;
1162  } else {
1163  float *data_in[2];
1164  AVComplexFloat *data[2];
1165  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1166  uint32_t *pixels;
1167  int pitch;
1168  for (ch = 0; ch < nb_display_channels; ch++) {
1169  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1170  data[ch] = s->rdft_data + nb_freq * ch;
1171  i = i_start + ch;
1172  for (x = 0; x < 2 * nb_freq; x++) {
1173  double w = (x-nb_freq) * (1.0 / nb_freq);
1174  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1175  i += channels;
1176  if (i >= SAMPLE_ARRAY_SIZE)
1177  i -= SAMPLE_ARRAY_SIZE;
1178  }
1179  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1180  data[ch][0].im = data[ch][nb_freq].re;
1181  data[ch][nb_freq].re = 0;
1182  }
1183  /* Least efficient way to do this, we should of course
1184  * directly access it but it is more than fast enough. */
1185  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1186  pitch >>= 2;
1187  pixels += pitch * s->height;
1188  for (y = 0; y < s->height; y++) {
1189  double w = 1 / sqrt(nb_freq);
1190  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1191  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1192  : a;
1193  a = FFMIN(a, 255);
1194  b = FFMIN(b, 255);
1195  pixels -= pitch;
1196  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1197  }
1198  SDL_UnlockTexture(s->vis_texture);
1199  }
1200  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1201  }
1202  if (!s->paused)
1203  s->xpos++;
1204  }
1205 }
1206 
1207 static void stream_component_close(VideoState *is, int stream_index)
1208 {
1209  AVFormatContext *ic = is->ic;
1210  AVCodecParameters *codecpar;
1211 
1212  if (stream_index < 0 || stream_index >= ic->nb_streams)
1213  return;
1214  codecpar = ic->streams[stream_index]->codecpar;
1215 
1216  switch (codecpar->codec_type) {
1217  case AVMEDIA_TYPE_AUDIO:
1218  decoder_abort(&is->auddec, &is->sampq);
1219  SDL_CloseAudioDevice(audio_dev);
1220  decoder_destroy(&is->auddec);
1221  swr_free(&is->swr_ctx);
1222  av_freep(&is->audio_buf1);
1223  is->audio_buf1_size = 0;
1224  is->audio_buf = NULL;
1225 
1226  if (is->rdft) {
1227  av_tx_uninit(&is->rdft);
1228  av_freep(&is->real_data);
1229  av_freep(&is->rdft_data);
1230  is->rdft = NULL;
1231  is->rdft_bits = 0;
1232  }
1233  break;
1234  case AVMEDIA_TYPE_VIDEO:
1235  decoder_abort(&is->viddec, &is->pictq);
1236  decoder_destroy(&is->viddec);
1237  break;
1238  case AVMEDIA_TYPE_SUBTITLE:
1239  decoder_abort(&is->subdec, &is->subpq);
1240  decoder_destroy(&is->subdec);
1241  break;
1242  default:
1243  break;
1244  }
1245 
1246  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1247  switch (codecpar->codec_type) {
1248  case AVMEDIA_TYPE_AUDIO:
1249  is->audio_st = NULL;
1250  is->audio_stream = -1;
1251  break;
1252  case AVMEDIA_TYPE_VIDEO:
1253  is->video_st = NULL;
1254  is->video_stream = -1;
1255  break;
1256  case AVMEDIA_TYPE_SUBTITLE:
1257  is->subtitle_st = NULL;
1258  is->subtitle_stream = -1;
1259  break;
1260  default:
1261  break;
1262  }
1263 }
1264 
1266 {
1267  /* XXX: use a special url_shutdown call to abort parse cleanly */
1268  is->abort_request = 1;
1269  SDL_WaitThread(is->read_tid, NULL);
1270 
1271  /* close each stream */
1272  if (is->audio_stream >= 0)
1273  stream_component_close(is, is->audio_stream);
1274  if (is->video_stream >= 0)
1275  stream_component_close(is, is->video_stream);
1276  if (is->subtitle_stream >= 0)
1277  stream_component_close(is, is->subtitle_stream);
1278 
1279  avformat_close_input(&is->ic);
1280 
1281  packet_queue_destroy(&is->videoq);
1282  packet_queue_destroy(&is->audioq);
1283  packet_queue_destroy(&is->subtitleq);
1284 
1285  /* free all pictures */
1286  frame_queue_destroy(&is->pictq);
1287  frame_queue_destroy(&is->sampq);
1288  frame_queue_destroy(&is->subpq);
1289  SDL_DestroyCond(is->continue_read_thread);
1290  sws_freeContext(is->sub_convert_ctx);
1291  av_free(is->filename);
1292  if (is->vis_texture)
1293  SDL_DestroyTexture(is->vis_texture);
1294  if (is->vid_texture)
1295  SDL_DestroyTexture(is->vid_texture);
1296  if (is->sub_texture)
1297  SDL_DestroyTexture(is->sub_texture);
1298  av_free(is);
1299 }
1300 
1301 static void do_exit(VideoState *is)
1302 {
1303  if (is) {
1304  stream_close(is);
1305  }
1306  if (renderer)
1307  SDL_DestroyRenderer(renderer);
1308  if (vk_renderer)
1310  if (window)
1311  SDL_DestroyWindow(window);
1312  uninit_opts();
1313  for (int i = 0; i < nb_vfilters; i++)
1321  if (show_status)
1322  printf("\n");
1323  SDL_Quit();
1324  av_log(NULL, AV_LOG_QUIET, "%s", "");
1325  exit(0);
1326 }
1327 
1328 static void sigterm_handler(int sig)
1329 {
1330  exit(123);
1331 }
1332 
1334 {
1335  SDL_Rect rect;
1336  int max_width = screen_width ? screen_width : INT_MAX;
1337  int max_height = screen_height ? screen_height : INT_MAX;
1338  if (max_width == INT_MAX && max_height == INT_MAX)
1339  max_height = height;
1340  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1341  default_width = rect.w;
1342  default_height = rect.h;
1343 }
1344 
1346 {
1347  int w,h;
1348 
1351 
1352  if (!window_title)
1354  SDL_SetWindowTitle(window, window_title);
1355 
1356  SDL_SetWindowSize(window, w, h);
1357  SDL_SetWindowPosition(window, screen_left, screen_top);
1358  if (is_full_screen)
1359  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1360  SDL_ShowWindow(window);
1361 
1362  is->width = w;
1363  is->height = h;
1364 
1365  return 0;
1366 }
1367 
1368 /* display the current picture, if any */
1370 {
1371  if (!is->width)
1372  video_open(is);
1373 
1374  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1375  SDL_RenderClear(renderer);
1376  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1378  else if (is->video_st)
1380  SDL_RenderPresent(renderer);
1381 }
1382 
1383 static double get_clock(Clock *c)
1384 {
1385  if (*c->queue_serial != c->serial)
1386  return NAN;
1387  if (c->paused) {
1388  return c->pts;
1389  } else {
1390  double time = av_gettime_relative() / 1000000.0;
1391  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1392  }
1393 }
1394 
1395 static void set_clock_at(Clock *c, double pts, int serial, double time)
1396 {
1397  c->pts = pts;
1398  c->last_updated = time;
1399  c->pts_drift = c->pts - time;
1400  c->serial = serial;
1401 }
1402 
1403 static void set_clock(Clock *c, double pts, int serial)
1404 {
1405  double time = av_gettime_relative() / 1000000.0;
1406  set_clock_at(c, pts, serial, time);
1407 }
1408 
1409 static void set_clock_speed(Clock *c, double speed)
1410 {
1411  set_clock(c, get_clock(c), c->serial);
1412  c->speed = speed;
1413 }
1414 
1415 static void init_clock(Clock *c, int *queue_serial)
1416 {
1417  c->speed = 1.0;
1418  c->paused = 0;
1419  c->queue_serial = queue_serial;
1420  set_clock(c, NAN, -1);
1421 }
1422 
1423 static void sync_clock_to_slave(Clock *c, Clock *slave)
1424 {
1425  double clock = get_clock(c);
1426  double slave_clock = get_clock(slave);
1427  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1428  set_clock(c, slave_clock, slave->serial);
1429 }
1430 
1432  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1433  if (is->video_st)
1434  return AV_SYNC_VIDEO_MASTER;
1435  else
1436  return AV_SYNC_AUDIO_MASTER;
1437  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1438  if (is->audio_st)
1439  return AV_SYNC_AUDIO_MASTER;
1440  else
1441  return AV_SYNC_EXTERNAL_CLOCK;
1442  } else {
1443  return AV_SYNC_EXTERNAL_CLOCK;
1444  }
1445 }
1446 
1447 /* get the current master clock value */
1449 {
1450  double val;
1451 
1452  switch (get_master_sync_type(is)) {
1453  case AV_SYNC_VIDEO_MASTER:
1454  val = get_clock(&is->vidclk);
1455  break;
1456  case AV_SYNC_AUDIO_MASTER:
1457  val = get_clock(&is->audclk);
1458  break;
1459  default:
1460  val = get_clock(&is->extclk);
1461  break;
1462  }
1463  return val;
1464 }
1465 
1467  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1468  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1470  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1471  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1473  } else {
1474  double speed = is->extclk.speed;
1475  if (speed != 1.0)
1476  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1477  }
1478 }
1479 
1480 /* seek in the stream */
1481 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1482 {
1483  if (!is->seek_req) {
1484  is->seek_pos = pos;
1485  is->seek_rel = rel;
1486  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1487  if (by_bytes)
1488  is->seek_flags |= AVSEEK_FLAG_BYTE;
1489  is->seek_req = 1;
1490  SDL_CondSignal(is->continue_read_thread);
1491  }
1492 }
1493 
1494 /* pause or resume the video */
1496 {
1497  if (is->paused) {
1498  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1499  if (is->read_pause_return != AVERROR(ENOSYS)) {
1500  is->vidclk.paused = 0;
1501  }
1502  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1503  }
1504  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1505  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1506 }
1507 
1509 {
1511  is->step = 0;
1512 }
1513 
1515 {
1516  is->muted = !is->muted;
1517 }
1518 
1519 static void update_volume(VideoState *is, int sign, double step)
1520 {
1521  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1522  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1523  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1524 }
1525 
1527 {
1528  /* if the stream is paused unpause it, then step */
1529  if (is->paused)
1531  is->step = 1;
1532 }
1533 
1534 static double compute_target_delay(double delay, VideoState *is)
1535 {
1536  double sync_threshold, diff = 0;
1537 
1538  /* update delay to follow master synchronisation source */
1540  /* if video is slave, we try to correct big delays by
1541  duplicating or deleting a frame */
1542  diff = get_clock(&is->vidclk) - get_master_clock(is);
1543 
1544  /* skip or repeat frame. We take into account the
1545  delay to compute the threshold. I still don't know
1546  if it is the best guess */
1547  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1548  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1549  if (diff <= -sync_threshold)
1550  delay = FFMAX(0, delay + diff);
1551  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1552  delay = delay + diff;
1553  else if (diff >= sync_threshold)
1554  delay = 2 * delay;
1555  }
1556  }
1557 
1558  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1559  delay, -diff);
1560 
1561  return delay;
1562 }
1563 
1564 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1565  if (vp->serial == nextvp->serial) {
1566  double duration = nextvp->pts - vp->pts;
1567  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1568  return vp->duration;
1569  else
1570  return duration;
1571  } else {
1572  return 0.0;
1573  }
1574 }
1575 
1576 static void update_video_pts(VideoState *is, double pts, int serial)
1577 {
1578  /* update current video pts */
1579  set_clock(&is->vidclk, pts, serial);
1580  sync_clock_to_slave(&is->extclk, &is->vidclk);
1581 }
1582 
1583 /* called to display each frame */
1584 static void video_refresh(void *opaque, double *remaining_time)
1585 {
1586  VideoState *is = opaque;
1587  double time;
1588 
1589  Frame *sp, *sp2;
1590 
1591  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1593 
1594  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1595  time = av_gettime_relative() / 1000000.0;
1596  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1597  video_display(is);
1598  is->last_vis_time = time;
1599  }
1600  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1601  }
1602 
1603  if (is->video_st) {
1604 retry:
1605  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1606  // nothing to do, no picture to display in the queue
1607  } else {
1608  double last_duration, duration, delay;
1609  Frame *vp, *lastvp;
1610 
1611  /* dequeue the picture */
1612  lastvp = frame_queue_peek_last(&is->pictq);
1613  vp = frame_queue_peek(&is->pictq);
1614 
1615  if (vp->serial != is->videoq.serial) {
1616  frame_queue_next(&is->pictq);
1617  goto retry;
1618  }
1619 
1620  if (lastvp->serial != vp->serial)
1621  is->frame_timer = av_gettime_relative() / 1000000.0;
1622 
1623  if (is->paused)
1624  goto display;
1625 
1626  /* compute nominal last_duration */
1627  last_duration = vp_duration(is, lastvp, vp);
1628  delay = compute_target_delay(last_duration, is);
1629 
1630  time= av_gettime_relative()/1000000.0;
1631  if (time < is->frame_timer + delay) {
1632  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1633  goto display;
1634  }
1635 
1636  is->frame_timer += delay;
1637  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1638  is->frame_timer = time;
1639 
1640  SDL_LockMutex(is->pictq.mutex);
1641  if (!isnan(vp->pts))
1642  update_video_pts(is, vp->pts, vp->serial);
1643  SDL_UnlockMutex(is->pictq.mutex);
1644 
1645  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1646  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1647  duration = vp_duration(is, vp, nextvp);
1648  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1649  is->frame_drops_late++;
1650  frame_queue_next(&is->pictq);
1651  goto retry;
1652  }
1653  }
1654 
1655  if (is->subtitle_st) {
1656  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1657  sp = frame_queue_peek(&is->subpq);
1658 
1659  if (frame_queue_nb_remaining(&is->subpq) > 1)
1660  sp2 = frame_queue_peek_next(&is->subpq);
1661  else
1662  sp2 = NULL;
1663 
1664  if (sp->serial != is->subtitleq.serial
1665  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1666  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1667  {
1668  if (sp->uploaded) {
1669  int i;
1670  for (i = 0; i < sp->sub.num_rects; i++) {
1671  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1672  uint8_t *pixels;
1673  int pitch, j;
1674 
1675  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1676  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1677  memset(pixels, 0, sub_rect->w << 2);
1678  SDL_UnlockTexture(is->sub_texture);
1679  }
1680  }
1681  }
1682  frame_queue_next(&is->subpq);
1683  } else {
1684  break;
1685  }
1686  }
1687  }
1688 
1689  frame_queue_next(&is->pictq);
1690  is->force_refresh = 1;
1691 
1692  if (is->step && !is->paused)
1694  }
1695 display:
1696  /* display picture */
1697  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1698  video_display(is);
1699  }
1700  is->force_refresh = 0;
1701  if (show_status) {
1702  AVBPrint buf;
1703  static int64_t last_time;
1704  int64_t cur_time;
1705  int aqsize, vqsize, sqsize;
1706  double av_diff;
1707 
1708  cur_time = av_gettime_relative();
1709  if (!last_time || (cur_time - last_time) >= 30000) {
1710  aqsize = 0;
1711  vqsize = 0;
1712  sqsize = 0;
1713  if (is->audio_st)
1714  aqsize = is->audioq.size;
1715  if (is->video_st)
1716  vqsize = is->videoq.size;
1717  if (is->subtitle_st)
1718  sqsize = is->subtitleq.size;
1719  av_diff = 0;
1720  if (is->audio_st && is->video_st)
1721  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1722  else if (is->video_st)
1723  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1724  else if (is->audio_st)
1725  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1726 
1728  av_bprintf(&buf,
1729  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1731  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1732  av_diff,
1733  is->frame_drops_early + is->frame_drops_late,
1734  aqsize / 1024,
1735  vqsize / 1024,
1736  sqsize);
1737 
1738  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1739  fprintf(stderr, "%s", buf.str);
1740  else
1741  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1742 
1743  fflush(stderr);
1744  av_bprint_finalize(&buf, NULL);
1745 
1746  last_time = cur_time;
1747  }
1748  }
1749 }
1750 
1751 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1752 {
1753  Frame *vp;
1754 
1755 #if defined(DEBUG_SYNC)
1756  printf("frame_type=%c pts=%0.3f\n",
1757  av_get_picture_type_char(src_frame->pict_type), pts);
1758 #endif
1759 
1760  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1761  return -1;
1762 
1763  vp->sar = src_frame->sample_aspect_ratio;
1764  vp->uploaded = 0;
1765 
1766  vp->width = src_frame->width;
1767  vp->height = src_frame->height;
1768  vp->format = src_frame->format;
1769 
1770  vp->pts = pts;
1771  vp->duration = duration;
1772  vp->pos = pos;
1773  vp->serial = serial;
1774 
1775  set_default_window_size(vp->width, vp->height, vp->sar);
1776 
1777  av_frame_move_ref(vp->frame, src_frame);
1778  frame_queue_push(&is->pictq);
1779  return 0;
1780 }
1781 
1783 {
1784  int got_picture;
1785 
1786  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1787  return -1;
1788 
1789  if (got_picture) {
1790  double dpts = NAN;
1791 
1792  if (frame->pts != AV_NOPTS_VALUE)
1793  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1794 
1795  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1796 
1798  if (frame->pts != AV_NOPTS_VALUE) {
1799  double diff = dpts - get_master_clock(is);
1800  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1801  diff - is->frame_last_filter_delay < 0 &&
1802  is->viddec.pkt_serial == is->vidclk.serial &&
1803  is->videoq.nb_packets) {
1804  is->frame_drops_early++;
1806  got_picture = 0;
1807  }
1808  }
1809  }
1810  }
1811 
1812  return got_picture;
1813 }
1814 
1815 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1816  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1817 {
1818  int ret, i;
1819  int nb_filters = graph->nb_filters;
1821 
1822  if (filtergraph) {
1825  if (!outputs || !inputs) {
1826  ret = AVERROR(ENOMEM);
1827  goto fail;
1828  }
1829 
1830  outputs->name = av_strdup("in");
1831  outputs->filter_ctx = source_ctx;
1832  outputs->pad_idx = 0;
1833  outputs->next = NULL;
1834 
1835  inputs->name = av_strdup("out");
1836  inputs->filter_ctx = sink_ctx;
1837  inputs->pad_idx = 0;
1838  inputs->next = NULL;
1839 
1840  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1841  goto fail;
1842  } else {
1843  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1844  goto fail;
1845  }
1846 
1847  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1848  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1849  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1850 
1851  ret = avfilter_graph_config(graph, NULL);
1852 fail:
1855  return ret;
1856 }
1857 
1858 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1859 {
1861  char sws_flags_str[512] = "";
1862  int ret;
1863  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1864  AVCodecParameters *codecpar = is->video_st->codecpar;
1865  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1866  const AVDictionaryEntry *e = NULL;
1867  int nb_pix_fmts = 0;
1868  int i, j;
1870 
1871  if (!par)
1872  return AVERROR(ENOMEM);
1873 
1874  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1875  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) {
1876  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1877  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1878  break;
1879  }
1880  }
1881  }
1882 
1883  while ((e = av_dict_iterate(sws_dict, e))) {
1884  if (!strcmp(e->key, "sws_flags")) {
1885  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1886  } else
1887  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1888  }
1889  if (strlen(sws_flags_str))
1890  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1891 
1892  graph->scale_sws_opts = av_strdup(sws_flags_str);
1893 
1894 
1895  filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"),
1896  "ffplay_buffer");
1897  if (!filt_src) {
1898  ret = AVERROR(ENOMEM);
1899  goto fail;
1900  }
1901 
1902  par->format = frame->format;
1903  par->time_base = is->video_st->time_base;
1904  par->width = frame->width;
1905  par->height = frame->height;
1906  par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
1907  par->color_space = frame->colorspace;
1908  par->color_range = frame->color_range;
1909  par->frame_rate = fr;
1910  par->hw_frames_ctx = frame->hw_frames_ctx;
1911  ret = av_buffersrc_parameters_set(filt_src, par);
1912  if (ret < 0)
1913  goto fail;
1914 
1915  ret = avfilter_init_dict(filt_src, NULL);
1916  if (ret < 0)
1917  goto fail;
1918 
1919  filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"),
1920  "ffplay_buffersink");
1921  if (!filt_out) {
1922  ret = AVERROR(ENOMEM);
1923  goto fail;
1924  }
1925 
1926  if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
1927  0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
1928  goto fail;
1929  if (!vk_renderer &&
1930  (ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN,
1933  goto fail;
1934 
1935  ret = avfilter_init_dict(filt_out, NULL);
1936  if (ret < 0)
1937  goto fail;
1938 
1939  last_filter = filt_out;
1940 
1941 /* Note: this macro adds a filter before the lastly added filter, so the
1942  * processing order of the filters is in reverse */
1943 #define INSERT_FILT(name, arg) do { \
1944  AVFilterContext *filt_ctx; \
1945  \
1946  ret = avfilter_graph_create_filter(&filt_ctx, \
1947  avfilter_get_by_name(name), \
1948  "ffplay_" name, arg, NULL, graph); \
1949  if (ret < 0) \
1950  goto fail; \
1951  \
1952  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1953  if (ret < 0) \
1954  goto fail; \
1955  \
1956  last_filter = filt_ctx; \
1957 } while (0)
1958 
1959  if (autorotate) {
1960  double theta = 0.0;
1961  int32_t *displaymatrix = NULL;
1963  if (sd)
1964  displaymatrix = (int32_t *)sd->data;
1965  if (!displaymatrix) {
1966  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1967  is->video_st->codecpar->nb_coded_side_data,
1969  if (psd)
1970  displaymatrix = (int32_t *)psd->data;
1971  }
1972  theta = get_rotation(displaymatrix);
1973 
1974  if (fabs(theta - 90) < 1.0) {
1975  INSERT_FILT("transpose", displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1976  } else if (fabs(theta - 180) < 1.0) {
1977  if (displaymatrix[0] < 0)
1978  INSERT_FILT("hflip", NULL);
1979  if (displaymatrix[4] < 0)
1980  INSERT_FILT("vflip", NULL);
1981  } else if (fabs(theta - 270) < 1.0) {
1982  INSERT_FILT("transpose", displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1983  } else if (fabs(theta) > 1.0) {
1984  char rotate_buf[64];
1985  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1986  INSERT_FILT("rotate", rotate_buf);
1987  } else {
1988  if (displaymatrix && displaymatrix[4] < 0)
1989  INSERT_FILT("vflip", NULL);
1990  }
1991  }
1992 
1993  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1994  goto fail;
1995 
1996  is->in_video_filter = filt_src;
1997  is->out_video_filter = filt_out;
1998 
1999 fail:
2000  av_freep(&par);
2001  return ret;
2002 }
2003 
2004 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2005 {
2006  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2007  char aresample_swr_opts[512] = "";
2008  const AVDictionaryEntry *e = NULL;
2009  AVBPrint bp;
2010  char asrc_args[256];
2011  int ret;
2012 
2013  avfilter_graph_free(&is->agraph);
2014  if (!(is->agraph = avfilter_graph_alloc()))
2015  return AVERROR(ENOMEM);
2016  is->agraph->nb_threads = filter_nbthreads;
2017 
2019 
2020  while ((e = av_dict_iterate(swr_opts, e)))
2021  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2022  if (strlen(aresample_swr_opts))
2023  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2024  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2025 
2026  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2027 
2028  ret = snprintf(asrc_args, sizeof(asrc_args),
2029  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2030  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2031  1, is->audio_filter_src.freq, bp.str);
2032 
2033  ret = avfilter_graph_create_filter(&filt_asrc,
2034  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2035  asrc_args, NULL, is->agraph);
2036  if (ret < 0)
2037  goto end;
2038 
2039  filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
2040  "ffplay_abuffersink");
2041  if (!filt_asink) {
2042  ret = AVERROR(ENOMEM);
2043  goto end;
2044  }
2045 
2046  if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0)
2047  goto end;
2048 
2049  if (force_output_format) {
2050  if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN,
2051  0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0)
2052  goto end;
2053  if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN,
2054  0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0)
2055  goto end;
2056  }
2057 
2058  ret = avfilter_init_dict(filt_asink, NULL);
2059  if (ret < 0)
2060  goto end;
2061 
2062  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2063  goto end;
2064 
2065  is->in_audio_filter = filt_asrc;
2066  is->out_audio_filter = filt_asink;
2067 
2068 end:
2069  if (ret < 0)
2070  avfilter_graph_free(&is->agraph);
2071  av_bprint_finalize(&bp, NULL);
2072 
2073  return ret;
2074 }
2075 
2076 static int audio_thread(void *arg)
2077 {
2078  VideoState *is = arg;
2080  Frame *af;
2081  int last_serial = -1;
2082  int reconfigure;
2083  int got_frame = 0;
2084  AVRational tb;
2085  int ret = 0;
2086 
2087  if (!frame)
2088  return AVERROR(ENOMEM);
2089 
2090  do {
2091  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2092  goto the_end;
2093 
2094  if (got_frame) {
2095  tb = (AVRational){1, frame->sample_rate};
2096 
2097  reconfigure =
2098  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2099  frame->format, frame->ch_layout.nb_channels) ||
2100  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2101  is->audio_filter_src.freq != frame->sample_rate ||
2102  is->auddec.pkt_serial != last_serial;
2103 
2104  if (reconfigure) {
2105  char buf1[1024], buf2[1024];
2106  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2107  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2109  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2110  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2111  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2112 
2113  is->audio_filter_src.fmt = frame->format;
2114  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2115  if (ret < 0)
2116  goto the_end;
2117  is->audio_filter_src.freq = frame->sample_rate;
2118  last_serial = is->auddec.pkt_serial;
2119 
2120  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2121  goto the_end;
2122  }
2123 
2124  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2125  goto the_end;
2126 
2127  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2128  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2129  tb = av_buffersink_get_time_base(is->out_audio_filter);
2130  if (!(af = frame_queue_peek_writable(&is->sampq)))
2131  goto the_end;
2132 
2133  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2134  af->pos = fd ? fd->pkt_pos : -1;
2135  af->serial = is->auddec.pkt_serial;
2136  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2137 
2139  frame_queue_push(&is->sampq);
2140 
2141  if (is->audioq.serial != is->auddec.pkt_serial)
2142  break;
2143  }
2144  if (ret == AVERROR_EOF)
2145  is->auddec.finished = is->auddec.pkt_serial;
2146  }
2147  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2148  the_end:
2149  avfilter_graph_free(&is->agraph);
2150  av_frame_free(&frame);
2151  return ret;
2152 }
2153 
2154 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2155 {
2157  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2158  if (!d->decoder_tid) {
2159  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2160  return AVERROR(ENOMEM);
2161  }
2162  return 0;
2163 }
2164 
2165 static int video_thread(void *arg)
2166 {
2167  VideoState *is = arg;
2169  double pts;
2170  double duration;
2171  int ret;
2172  AVRational tb = is->video_st->time_base;
2173  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2174 
2175  AVFilterGraph *graph = NULL;
2176  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2177  int last_w = 0;
2178  int last_h = 0;
2179  enum AVPixelFormat last_format = -2;
2180  int last_serial = -1;
2181  int last_vfilter_idx = 0;
2182 
2183  if (!frame)
2184  return AVERROR(ENOMEM);
2185 
2186  for (;;) {
2188  if (ret < 0)
2189  goto the_end;
2190  if (!ret)
2191  continue;
2192 
2193  if ( last_w != frame->width
2194  || last_h != frame->height
2195  || last_format != frame->format
2196  || last_serial != is->viddec.pkt_serial
2197  || last_vfilter_idx != is->vfilter_idx) {
2199  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2200  last_w, last_h,
2201  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2202  frame->width, frame->height,
2203  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2204  avfilter_graph_free(&graph);
2205  graph = avfilter_graph_alloc();
2206  if (!graph) {
2207  ret = AVERROR(ENOMEM);
2208  goto the_end;
2209  }
2210  graph->nb_threads = filter_nbthreads;
2211  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2212  SDL_Event event;
2213  event.type = FF_QUIT_EVENT;
2214  event.user.data1 = is;
2215  SDL_PushEvent(&event);
2216  goto the_end;
2217  }
2218  filt_in = is->in_video_filter;
2219  filt_out = is->out_video_filter;
2220  last_w = frame->width;
2221  last_h = frame->height;
2222  last_format = frame->format;
2223  last_serial = is->viddec.pkt_serial;
2224  last_vfilter_idx = is->vfilter_idx;
2225  frame_rate = av_buffersink_get_frame_rate(filt_out);
2226  }
2227 
2228  ret = av_buffersrc_add_frame(filt_in, frame);
2229  if (ret < 0)
2230  goto the_end;
2231 
2232  while (ret >= 0) {
2233  FrameData *fd;
2234 
2235  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2236 
2237  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2238  if (ret < 0) {
2239  if (ret == AVERROR_EOF)
2240  is->viddec.finished = is->viddec.pkt_serial;
2241  ret = 0;
2242  break;
2243  }
2244 
2245  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2246 
2247  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2248  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2249  is->frame_last_filter_delay = 0;
2250  tb = av_buffersink_get_time_base(filt_out);
2251  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2252  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2253  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2255  if (is->videoq.serial != is->viddec.pkt_serial)
2256  break;
2257  }
2258 
2259  if (ret < 0)
2260  goto the_end;
2261  }
2262  the_end:
2263  avfilter_graph_free(&graph);
2264  av_frame_free(&frame);
2265  return 0;
2266 }
2267 
2268 static int subtitle_thread(void *arg)
2269 {
2270  VideoState *is = arg;
2271  Frame *sp;
2272  int got_subtitle;
2273  double pts;
2274 
2275  for (;;) {
2276  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2277  return 0;
2278 
2279  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2280  break;
2281 
2282  pts = 0;
2283 
2284  if (got_subtitle && sp->sub.format == 0) {
2285  if (sp->sub.pts != AV_NOPTS_VALUE)
2286  pts = sp->sub.pts / (double)AV_TIME_BASE;
2287  sp->pts = pts;
2288  sp->serial = is->subdec.pkt_serial;
2289  sp->width = is->subdec.avctx->width;
2290  sp->height = is->subdec.avctx->height;
2291  sp->uploaded = 0;
2292 
2293  /* now we can update the picture count */
2294  frame_queue_push(&is->subpq);
2295  } else if (got_subtitle) {
2296  avsubtitle_free(&sp->sub);
2297  }
2298  }
2299  return 0;
2300 }
2301 
2302 /* copy samples for viewing in editor window */
2303 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2304 {
2305  int size, len;
2306 
2307  size = samples_size / sizeof(short);
2308  while (size > 0) {
2309  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2310  if (len > size)
2311  len = size;
2312  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2313  samples += len;
2314  is->sample_array_index += len;
2315  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2316  is->sample_array_index = 0;
2317  size -= len;
2318  }
2319 }
2320 
2321 /* return the wanted number of samples to get better sync if sync_type is video
2322  * or external master clock */
2323 static int synchronize_audio(VideoState *is, int nb_samples)
2324 {
2325  int wanted_nb_samples = nb_samples;
2326 
2327  /* if not master, then we try to remove or add samples to correct the clock */
2329  double diff, avg_diff;
2330  int min_nb_samples, max_nb_samples;
2331 
2332  diff = get_clock(&is->audclk) - get_master_clock(is);
2333 
2334  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2335  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2336  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2337  /* not enough measures to have a correct estimate */
2338  is->audio_diff_avg_count++;
2339  } else {
2340  /* estimate the A-V difference */
2341  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2342 
2343  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2344  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2345  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2346  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2347  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2348  }
2349  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2350  diff, avg_diff, wanted_nb_samples - nb_samples,
2351  is->audio_clock, is->audio_diff_threshold);
2352  }
2353  } else {
2354  /* too big difference : may be initial PTS errors, so
2355  reset A-V filter */
2356  is->audio_diff_avg_count = 0;
2357  is->audio_diff_cum = 0;
2358  }
2359  }
2360 
2361  return wanted_nb_samples;
2362 }
2363 
2364 /**
2365  * Decode one audio frame and return its uncompressed size.
2366  *
2367  * The processed audio frame is decoded, converted if required, and
2368  * stored in is->audio_buf, with size in bytes given by the return
2369  * value.
2370  */
2372 {
2373  int data_size, resampled_data_size;
2374  av_unused double audio_clock0;
2375  int wanted_nb_samples;
2376  Frame *af;
2377 
2378  if (is->paused)
2379  return -1;
2380 
2381  do {
2382 #if defined(_WIN32)
2383  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2384  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2385  return -1;
2386  av_usleep (1000);
2387  }
2388 #endif
2389  if (!(af = frame_queue_peek_readable(&is->sampq)))
2390  return -1;
2391  frame_queue_next(&is->sampq);
2392  } while (af->serial != is->audioq.serial);
2393 
2395  af->frame->nb_samples,
2396  af->frame->format, 1);
2397 
2398  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2399 
2400  if (af->frame->format != is->audio_src.fmt ||
2401  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2402  af->frame->sample_rate != is->audio_src.freq ||
2403  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2404  int ret;
2405  swr_free(&is->swr_ctx);
2406  ret = swr_alloc_set_opts2(&is->swr_ctx,
2407  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2408  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2409  0, NULL);
2410  if (ret < 0 || swr_init(is->swr_ctx) < 0) {
2412  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2414  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2415  swr_free(&is->swr_ctx);
2416  return -1;
2417  }
2418  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2419  return -1;
2420  is->audio_src.freq = af->frame->sample_rate;
2421  is->audio_src.fmt = af->frame->format;
2422  }
2423 
2424  if (is->swr_ctx) {
2425  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2426  uint8_t **out = &is->audio_buf1;
2427  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2428  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2429  int len2;
2430  if (out_size < 0) {
2431  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2432  return -1;
2433  }
2434  if (wanted_nb_samples != af->frame->nb_samples) {
2435  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2436  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2437  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2438  return -1;
2439  }
2440  }
2441  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2442  if (!is->audio_buf1)
2443  return AVERROR(ENOMEM);
2444  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2445  if (len2 < 0) {
2446  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2447  return -1;
2448  }
2449  if (len2 == out_count) {
2450  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2451  if (swr_init(is->swr_ctx) < 0)
2452  swr_free(&is->swr_ctx);
2453  }
2454  is->audio_buf = is->audio_buf1;
2455  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2456  } else {
2457  is->audio_buf = af->frame->data[0];
2458  resampled_data_size = data_size;
2459  }
2460 
2461  audio_clock0 = is->audio_clock;
2462  /* update the audio clock with the pts */
2463  if (!isnan(af->pts))
2464  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2465  else
2466  is->audio_clock = NAN;
2467  is->audio_clock_serial = af->serial;
2468 #ifdef DEBUG
2469  {
2470  static double last_clock;
2471  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2472  is->audio_clock - last_clock,
2473  is->audio_clock, audio_clock0);
2474  last_clock = is->audio_clock;
2475  }
2476 #endif
2477  return resampled_data_size;
2478 }
2479 
2480 /* prepare a new audio buffer */
2481 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2482 {
2483  VideoState *is = opaque;
2484  int audio_size, len1;
2485 
2487 
2488  while (len > 0) {
2489  if (is->audio_buf_index >= is->audio_buf_size) {
2490  audio_size = audio_decode_frame(is);
2491  if (audio_size < 0) {
2492  /* if error, just output silence */
2493  is->audio_buf = NULL;
2494  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2495  } else {
2496  if (is->show_mode != SHOW_MODE_VIDEO)
2497  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2498  is->audio_buf_size = audio_size;
2499  }
2500  is->audio_buf_index = 0;
2501  }
2502  len1 = is->audio_buf_size - is->audio_buf_index;
2503  if (len1 > len)
2504  len1 = len;
2505  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2506  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2507  else {
2508  memset(stream, 0, len1);
2509  if (!is->muted && is->audio_buf)
2510  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2511  }
2512  len -= len1;
2513  stream += len1;
2514  is->audio_buf_index += len1;
2515  }
2516  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2517  /* Let's assume the audio driver that is used by SDL has two periods. */
2518  if (!isnan(is->audio_clock)) {
2519  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2520  sync_clock_to_slave(&is->extclk, &is->audclk);
2521  }
2522 }
2523 
2524 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2525 {
2526  SDL_AudioSpec wanted_spec, spec;
2527  const char *env;
2528  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2529  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2530  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2531  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2532 
2533  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2534  if (env) {
2535  wanted_nb_channels = atoi(env);
2536  av_channel_layout_uninit(wanted_channel_layout);
2537  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2538  }
2539  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2540  av_channel_layout_uninit(wanted_channel_layout);
2541  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2542  }
2543  wanted_nb_channels = wanted_channel_layout->nb_channels;
2544  wanted_spec.channels = wanted_nb_channels;
2545  wanted_spec.freq = wanted_sample_rate;
2546  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2547  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2548  return -1;
2549  }
2550  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2551  next_sample_rate_idx--;
2552  wanted_spec.format = AUDIO_S16SYS;
2553  wanted_spec.silence = 0;
2554  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2555  wanted_spec.callback = sdl_audio_callback;
2556  wanted_spec.userdata = opaque;
2557  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2558  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2559  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2560  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2561  if (!wanted_spec.channels) {
2562  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2563  wanted_spec.channels = wanted_nb_channels;
2564  if (!wanted_spec.freq) {
2566  "No more combinations to try, audio open failed\n");
2567  return -1;
2568  }
2569  }
2570  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2571  }
2572  if (spec.format != AUDIO_S16SYS) {
2574  "SDL advised audio format %d is not supported!\n", spec.format);
2575  return -1;
2576  }
2577  if (spec.channels != wanted_spec.channels) {
2578  av_channel_layout_uninit(wanted_channel_layout);
2579  av_channel_layout_default(wanted_channel_layout, spec.channels);
2580  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2582  "SDL advised channel count %d is not supported!\n", spec.channels);
2583  return -1;
2584  }
2585  }
2586 
2587  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2588  audio_hw_params->freq = spec.freq;
2589  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2590  return -1;
2591  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2592  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2593  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2594  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2595  return -1;
2596  }
2597  return spec.size;
2598 }
2599 
2600 static int create_hwaccel(AVBufferRef **device_ctx)
2601 {
2602  enum AVHWDeviceType type;
2603  int ret;
2604  AVBufferRef *vk_dev;
2605 
2606  *device_ctx = NULL;
2607 
2608  if (!hwaccel)
2609  return 0;
2610 
2612  if (type == AV_HWDEVICE_TYPE_NONE)
2613  return AVERROR(ENOTSUP);
2614 
2616  if (ret < 0)
2617  return ret;
2618 
2619  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2620  if (!ret)
2621  return 0;
2622 
2623  if (ret != AVERROR(ENOSYS))
2624  return ret;
2625 
2626  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2627  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2628  return ret;
2629 }
2630 
2631 /* open a given stream. Return 0 if OK */
2632 static int stream_component_open(VideoState *is, int stream_index)
2633 {
2634  AVFormatContext *ic = is->ic;
2635  AVCodecContext *avctx;
2636  const AVCodec *codec;
2637  const char *forced_codec_name = NULL;
2638  AVDictionary *opts = NULL;
2639  int sample_rate;
2640  AVChannelLayout ch_layout = { 0 };
2641  int ret = 0;
2642  int stream_lowres = lowres;
2643 
2644  if (stream_index < 0 || stream_index >= ic->nb_streams)
2645  return -1;
2646 
2647  avctx = avcodec_alloc_context3(NULL);
2648  if (!avctx)
2649  return AVERROR(ENOMEM);
2650 
2651  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2652  if (ret < 0)
2653  goto fail;
2654  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2655 
2656  codec = avcodec_find_decoder(avctx->codec_id);
2657 
2658  switch(avctx->codec_type){
2659  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2660  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2661  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2662  }
2663  if (forced_codec_name)
2664  codec = avcodec_find_decoder_by_name(forced_codec_name);
2665  if (!codec) {
2666  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2667  "No codec could be found with name '%s'\n", forced_codec_name);
2668  else av_log(NULL, AV_LOG_WARNING,
2669  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2670  ret = AVERROR(EINVAL);
2671  goto fail;
2672  }
2673 
2674  avctx->codec_id = codec->id;
2675  if (stream_lowres > codec->max_lowres) {
2676  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2677  codec->max_lowres);
2678  stream_lowres = codec->max_lowres;
2679  }
2680  avctx->lowres = stream_lowres;
2681 
2682  if (fast)
2683  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2684 
2685  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2686  ic->streams[stream_index], codec, &opts, NULL);
2687  if (ret < 0)
2688  goto fail;
2689 
2690  if (!av_dict_get(opts, "threads", NULL, 0))
2691  av_dict_set(&opts, "threads", "auto", 0);
2692  if (stream_lowres)
2693  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2694 
2695  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2696 
2697  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2698  ret = create_hwaccel(&avctx->hw_device_ctx);
2699  if (ret < 0)
2700  goto fail;
2701  }
2702 
2703  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2704  goto fail;
2705  }
2707  if (ret < 0)
2708  goto fail;
2709 
2710  is->eof = 0;
2711  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2712  switch (avctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714  {
2715  AVFilterContext *sink;
2716 
2717  is->audio_filter_src.freq = avctx->sample_rate;
2718  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2719  if (ret < 0)
2720  goto fail;
2721  is->audio_filter_src.fmt = avctx->sample_fmt;
2722  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2723  goto fail;
2724  sink = is->out_audio_filter;
2725  sample_rate = av_buffersink_get_sample_rate(sink);
2726  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2727  if (ret < 0)
2728  goto fail;
2729  }
2730 
2731  /* prepare audio output */
2732  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2733  goto fail;
2734  is->audio_hw_buf_size = ret;
2735  is->audio_src = is->audio_tgt;
2736  is->audio_buf_size = 0;
2737  is->audio_buf_index = 0;
2738 
2739  /* init averaging filter */
2740  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2741  is->audio_diff_avg_count = 0;
2742  /* since we do not have a precise anough audio FIFO fullness,
2743  we correct audio sync only if larger than this threshold */
2744  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2745 
2746  is->audio_stream = stream_index;
2747  is->audio_st = ic->streams[stream_index];
2748 
2749  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2750  goto fail;
2751  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2752  is->auddec.start_pts = is->audio_st->start_time;
2753  is->auddec.start_pts_tb = is->audio_st->time_base;
2754  }
2755  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2756  goto out;
2757  SDL_PauseAudioDevice(audio_dev, 0);
2758  break;
2759  case AVMEDIA_TYPE_VIDEO:
2760  is->video_stream = stream_index;
2761  is->video_st = ic->streams[stream_index];
2762 
2763  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2764  goto fail;
2765  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2766  goto out;
2767  is->queue_attachments_req = 1;
2768  break;
2769  case AVMEDIA_TYPE_SUBTITLE:
2770  is->subtitle_stream = stream_index;
2771  is->subtitle_st = ic->streams[stream_index];
2772 
2773  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2774  goto fail;
2775  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2776  goto out;
2777  break;
2778  default:
2779  break;
2780  }
2781  goto out;
2782 
2783 fail:
2784  avcodec_free_context(&avctx);
2785 out:
2786  av_channel_layout_uninit(&ch_layout);
2787  av_dict_free(&opts);
2788 
2789  return ret;
2790 }
2791 
2792 static int decode_interrupt_cb(void *ctx)
2793 {
2794  VideoState *is = ctx;
2795  return is->abort_request;
2796 }
2797 
2798 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2799  return stream_id < 0 ||
2800  queue->abort_request ||
2802  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2803 }
2804 
2806 {
2807  if( !strcmp(s->iformat->name, "rtp")
2808  || !strcmp(s->iformat->name, "rtsp")
2809  || !strcmp(s->iformat->name, "sdp")
2810  )
2811  return 1;
2812 
2813  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2814  || !strncmp(s->url, "udp:", 4)
2815  )
2816  )
2817  return 1;
2818  return 0;
2819 }
2820 
2821 /* this thread gets the stream from the disk or the network */
2822 static int read_thread(void *arg)
2823 {
2824  VideoState *is = arg;
2825  AVFormatContext *ic = NULL;
2826  int err, i, ret;
2827  int st_index[AVMEDIA_TYPE_NB];
2828  AVPacket *pkt = NULL;
2829  int64_t stream_start_time;
2830  int pkt_in_play_range = 0;
2831  const AVDictionaryEntry *t;
2832  SDL_mutex *wait_mutex = SDL_CreateMutex();
2833  int scan_all_pmts_set = 0;
2834  int64_t pkt_ts;
2835 
2836  if (!wait_mutex) {
2837  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2838  ret = AVERROR(ENOMEM);
2839  goto fail;
2840  }
2841 
2842  memset(st_index, -1, sizeof(st_index));
2843  is->eof = 0;
2844 
2845  pkt = av_packet_alloc();
2846  if (!pkt) {
2847  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2848  ret = AVERROR(ENOMEM);
2849  goto fail;
2850  }
2851  ic = avformat_alloc_context();
2852  if (!ic) {
2853  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2854  ret = AVERROR(ENOMEM);
2855  goto fail;
2856  }
2859  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2860  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2861  scan_all_pmts_set = 1;
2862  }
2863  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2864  if (err < 0) {
2865  print_error(is->filename, err);
2866  ret = -1;
2867  goto fail;
2868  }
2869  if (scan_all_pmts_set)
2870  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2872 
2874  if (ret < 0)
2875  goto fail;
2876  is->ic = ic;
2877 
2878  if (genpts)
2879  ic->flags |= AVFMT_FLAG_GENPTS;
2880 
2881  if (find_stream_info) {
2882  AVDictionary **opts;
2883  int orig_nb_streams = ic->nb_streams;
2884 
2886  if (err < 0) {
2888  "Error setting up avformat_find_stream_info() options\n");
2889  ret = err;
2890  goto fail;
2891  }
2892 
2893  err = avformat_find_stream_info(ic, opts);
2894 
2895  for (i = 0; i < orig_nb_streams; i++)
2896  av_dict_free(&opts[i]);
2897  av_freep(&opts);
2898 
2899  if (err < 0) {
2901  "%s: could not find codec parameters\n", is->filename);
2902  ret = -1;
2903  goto fail;
2904  }
2905  }
2906 
2907  if (ic->pb)
2908  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2909 
2910  if (seek_by_bytes < 0)
2912  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2913  strcmp("ogg", ic->iformat->name);
2914 
2915  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2916 
2917  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2918  window_title = av_asprintf("%s - %s", t->value, input_filename);
2919 
2920  /* if seeking requested, we execute it */
2921  if (start_time != AV_NOPTS_VALUE) {
2922  int64_t timestamp;
2923 
2924  timestamp = start_time;
2925  /* add the stream start time */
2926  if (ic->start_time != AV_NOPTS_VALUE)
2927  timestamp += ic->start_time;
2928  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2929  if (ret < 0) {
2930  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2931  is->filename, (double)timestamp / AV_TIME_BASE);
2932  }
2933  }
2934 
2935  is->realtime = is_realtime(ic);
2936 
2937  if (show_status)
2938  av_dump_format(ic, 0, is->filename, 0);
2939 
2940  for (i = 0; i < ic->nb_streams; i++) {
2941  AVStream *st = ic->streams[i];
2942  enum AVMediaType type = st->codecpar->codec_type;
2943  st->discard = AVDISCARD_ALL;
2944  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2946  st_index[type] = i;
2947  }
2948  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2949  if (wanted_stream_spec[i] && st_index[i] == -1) {
2950  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2951  st_index[i] = INT_MAX;
2952  }
2953  }
2954 
2955  if (!video_disable)
2956  st_index[AVMEDIA_TYPE_VIDEO] =
2958  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2959  if (!audio_disable)
2960  st_index[AVMEDIA_TYPE_AUDIO] =
2962  st_index[AVMEDIA_TYPE_AUDIO],
2963  st_index[AVMEDIA_TYPE_VIDEO],
2964  NULL, 0);
2966  st_index[AVMEDIA_TYPE_SUBTITLE] =
2968  st_index[AVMEDIA_TYPE_SUBTITLE],
2969  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2970  st_index[AVMEDIA_TYPE_AUDIO] :
2971  st_index[AVMEDIA_TYPE_VIDEO]),
2972  NULL, 0);
2973 
2974  is->show_mode = show_mode;
2975  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2976  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2977  AVCodecParameters *codecpar = st->codecpar;
2979  if (codecpar->width)
2980  set_default_window_size(codecpar->width, codecpar->height, sar);
2981  }
2982 
2983  /* open the streams */
2984  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2986  }
2987 
2988  ret = -1;
2989  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2991  }
2992  if (is->show_mode == SHOW_MODE_NONE)
2993  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2994 
2995  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2997  }
2998 
2999  if (is->video_stream < 0 && is->audio_stream < 0) {
3000  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3001  is->filename);
3002  ret = -1;
3003  goto fail;
3004  }
3005 
3006  if (infinite_buffer < 0 && is->realtime)
3007  infinite_buffer = 1;
3008 
3009  for (;;) {
3010  if (is->abort_request)
3011  break;
3012  if (is->paused != is->last_paused) {
3013  is->last_paused = is->paused;
3014  if (is->paused)
3015  is->read_pause_return = av_read_pause(ic);
3016  else
3017  av_read_play(ic);
3018  }
3019 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3020  if (is->paused &&
3021  (!strcmp(ic->iformat->name, "rtsp") ||
3022  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3023  /* wait 10 ms to avoid trying to get another packet */
3024  /* XXX: horrible */
3025  SDL_Delay(10);
3026  continue;
3027  }
3028 #endif
3029  if (is->seek_req) {
3030  int64_t seek_target = is->seek_pos;
3031  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3032  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3033 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3034 // of the seek_pos/seek_rel variables
3035 
3036  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3037  if (ret < 0) {
3039  "%s: error while seeking\n", is->ic->url);
3040  } else {
3041  if (is->audio_stream >= 0)
3042  packet_queue_flush(&is->audioq);
3043  if (is->subtitle_stream >= 0)
3044  packet_queue_flush(&is->subtitleq);
3045  if (is->video_stream >= 0)
3046  packet_queue_flush(&is->videoq);
3047  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3048  set_clock(&is->extclk, NAN, 0);
3049  } else {
3050  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3051  }
3052  }
3053  is->seek_req = 0;
3054  is->queue_attachments_req = 1;
3055  is->eof = 0;
3056  if (is->paused)
3058  }
3059  if (is->queue_attachments_req) {
3060  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3061  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3062  goto fail;
3063  packet_queue_put(&is->videoq, pkt);
3064  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3065  }
3066  is->queue_attachments_req = 0;
3067  }
3068 
3069  /* if the queue are full, no need to read more */
3070  if (infinite_buffer<1 &&
3071  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3072  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3073  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3074  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3075  /* wait 10 ms */
3076  SDL_LockMutex(wait_mutex);
3077  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3078  SDL_UnlockMutex(wait_mutex);
3079  continue;
3080  }
3081  if (!is->paused &&
3082  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3083  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3084  if (loop != 1 && (!loop || --loop)) {
3086  } else if (autoexit) {
3087  ret = AVERROR_EOF;
3088  goto fail;
3089  }
3090  }
3091  ret = av_read_frame(ic, pkt);
3092  if (ret < 0) {
3093  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3094  if (is->video_stream >= 0)
3095  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3096  if (is->audio_stream >= 0)
3097  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3098  if (is->subtitle_stream >= 0)
3099  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3100  is->eof = 1;
3101  }
3102  if (ic->pb && ic->pb->error) {
3103  if (autoexit)
3104  goto fail;
3105  else
3106  break;
3107  }
3108  SDL_LockMutex(wait_mutex);
3109  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3110  SDL_UnlockMutex(wait_mutex);
3111  continue;
3112  } else {
3113  is->eof = 0;
3114  }
3115  /* check if packet is in play range specified by user, then queue, otherwise discard */
3116  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3117  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3118  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3119  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3121  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3122  <= ((double)duration / 1000000);
3123  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3124  packet_queue_put(&is->audioq, pkt);
3125  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3126  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3127  packet_queue_put(&is->videoq, pkt);
3128  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3129  packet_queue_put(&is->subtitleq, pkt);
3130  } else {
3132  }
3133  }
3134 
3135  ret = 0;
3136  fail:
3137  if (ic && !is->ic)
3138  avformat_close_input(&ic);
3139 
3140  av_packet_free(&pkt);
3141  if (ret != 0) {
3142  SDL_Event event;
3143 
3144  event.type = FF_QUIT_EVENT;
3145  event.user.data1 = is;
3146  SDL_PushEvent(&event);
3147  }
3148  SDL_DestroyMutex(wait_mutex);
3149  return 0;
3150 }
3151 
3152 static VideoState *stream_open(const char *filename,
3153  const AVInputFormat *iformat)
3154 {
3155  VideoState *is;
3156 
3157  is = av_mallocz(sizeof(VideoState));
3158  if (!is)
3159  return NULL;
3160  is->last_video_stream = is->video_stream = -1;
3161  is->last_audio_stream = is->audio_stream = -1;
3162  is->last_subtitle_stream = is->subtitle_stream = -1;
3163  is->filename = av_strdup(filename);
3164  if (!is->filename)
3165  goto fail;
3166  is->iformat = iformat;
3167  is->ytop = 0;
3168  is->xleft = 0;
3169 
3170  /* start video display */
3171  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3172  goto fail;
3173  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3174  goto fail;
3175  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3176  goto fail;
3177 
3178  if (packet_queue_init(&is->videoq) < 0 ||
3179  packet_queue_init(&is->audioq) < 0 ||
3180  packet_queue_init(&is->subtitleq) < 0)
3181  goto fail;
3182 
3183  if (!(is->continue_read_thread = SDL_CreateCond())) {
3184  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3185  goto fail;
3186  }
3187 
3188  init_clock(&is->vidclk, &is->videoq.serial);
3189  init_clock(&is->audclk, &is->audioq.serial);
3190  init_clock(&is->extclk, &is->extclk.serial);
3191  is->audio_clock_serial = -1;
3192  if (startup_volume < 0)
3193  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3194  if (startup_volume > 100)
3195  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3197  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3198  is->audio_volume = startup_volume;
3199  is->muted = 0;
3200  is->av_sync_type = av_sync_type;
3201  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3202  if (!is->read_tid) {
3203  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3204 fail:
3205  stream_close(is);
3206  return NULL;
3207  }
3208  return is;
3209 }
3210 
3212 {
3213  AVFormatContext *ic = is->ic;
3214  int start_index, stream_index;
3215  int old_index;
3216  AVStream *st;
3217  AVProgram *p = NULL;
3218  int nb_streams = is->ic->nb_streams;
3219 
3220  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3221  start_index = is->last_video_stream;
3222  old_index = is->video_stream;
3223  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3224  start_index = is->last_audio_stream;
3225  old_index = is->audio_stream;
3226  } else {
3227  start_index = is->last_subtitle_stream;
3228  old_index = is->subtitle_stream;
3229  }
3230  stream_index = start_index;
3231 
3232  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3233  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3234  if (p) {
3236  for (start_index = 0; start_index < nb_streams; start_index++)
3237  if (p->stream_index[start_index] == stream_index)
3238  break;
3239  if (start_index == nb_streams)
3240  start_index = -1;
3241  stream_index = start_index;
3242  }
3243  }
3244 
3245  for (;;) {
3246  if (++stream_index >= nb_streams)
3247  {
3249  {
3250  stream_index = -1;
3251  is->last_subtitle_stream = -1;
3252  goto the_end;
3253  }
3254  if (start_index == -1)
3255  return;
3256  stream_index = 0;
3257  }
3258  if (stream_index == start_index)
3259  return;
3260  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3261  if (st->codecpar->codec_type == codec_type) {
3262  /* check that parameters are OK */
3263  switch (codec_type) {
3264  case AVMEDIA_TYPE_AUDIO:
3265  if (st->codecpar->sample_rate != 0 &&
3266  st->codecpar->ch_layout.nb_channels != 0)
3267  goto the_end;
3268  break;
3269  case AVMEDIA_TYPE_VIDEO:
3270  case AVMEDIA_TYPE_SUBTITLE:
3271  goto the_end;
3272  default:
3273  break;
3274  }
3275  }
3276  }
3277  the_end:
3278  if (p && stream_index != -1)
3279  stream_index = p->stream_index[stream_index];
3280  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3282  old_index,
3283  stream_index);
3284 
3285  stream_component_close(is, old_index);
3286  stream_component_open(is, stream_index);
3287 }
3288 
3289 
3291 {
3293  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3294 }
3295 
3297 {
3298  int next = is->show_mode;
3299  do {
3300  next = (next + 1) % SHOW_MODE_NB;
3301  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3302  if (is->show_mode != next) {
3303  is->force_refresh = 1;
3304  is->show_mode = next;
3305  }
3306 }
3307 
3308 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3309  double remaining_time = 0.0;
3310  SDL_PumpEvents();
3311  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3313  SDL_ShowCursor(0);
3314  cursor_hidden = 1;
3315  }
3316  if (remaining_time > 0.0)
3317  av_usleep((int64_t)(remaining_time * 1000000.0));
3318  remaining_time = REFRESH_RATE;
3319  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3320  video_refresh(is, &remaining_time);
3321  SDL_PumpEvents();
3322  }
3323 }
3324 
3325 static void seek_chapter(VideoState *is, int incr)
3326 {
3328  int i;
3329 
3330  if (!is->ic->nb_chapters)
3331  return;
3332 
3333  /* find the current chapter */
3334  for (i = 0; i < is->ic->nb_chapters; i++) {
3335  AVChapter *ch = is->ic->chapters[i];
3336  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3337  i--;
3338  break;
3339  }
3340  }
3341 
3342  i += incr;
3343  i = FFMAX(i, 0);
3344  if (i >= is->ic->nb_chapters)
3345  return;
3346 
3347  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3348  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3349  AV_TIME_BASE_Q), 0, 0);
3350 }
3351 
3352 /* handle an event sent by the GUI */
3353 static void event_loop(VideoState *cur_stream)
3354 {
3355  SDL_Event event;
3356  double incr, pos, frac;
3357 
3358  for (;;) {
3359  double x;
3360  refresh_loop_wait_event(cur_stream, &event);
3361  switch (event.type) {
3362  case SDL_KEYDOWN:
3363  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3364  do_exit(cur_stream);
3365  break;
3366  }
3367  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3368  if (!cur_stream->width)
3369  continue;
3370  switch (event.key.keysym.sym) {
3371  case SDLK_f:
3372  toggle_full_screen(cur_stream);
3373  cur_stream->force_refresh = 1;
3374  break;
3375  case SDLK_p:
3376  case SDLK_SPACE:
3377  toggle_pause(cur_stream);
3378  break;
3379  case SDLK_m:
3380  toggle_mute(cur_stream);
3381  break;
3382  case SDLK_KP_MULTIPLY:
3383  case SDLK_0:
3384  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3385  break;
3386  case SDLK_KP_DIVIDE:
3387  case SDLK_9:
3388  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3389  break;
3390  case SDLK_s: // S: Step to next frame
3391  step_to_next_frame(cur_stream);
3392  break;
3393  case SDLK_a:
3395  break;
3396  case SDLK_v:
3398  break;
3399  case SDLK_c:
3403  break;
3404  case SDLK_t:
3406  break;
3407  case SDLK_w:
3408  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3409  if (++cur_stream->vfilter_idx >= nb_vfilters)
3410  cur_stream->vfilter_idx = 0;
3411  } else {
3412  cur_stream->vfilter_idx = 0;
3413  toggle_audio_display(cur_stream);
3414  }
3415  break;
3416  case SDLK_PAGEUP:
3417  if (cur_stream->ic->nb_chapters <= 1) {
3418  incr = 600.0;
3419  goto do_seek;
3420  }
3421  seek_chapter(cur_stream, 1);
3422  break;
3423  case SDLK_PAGEDOWN:
3424  if (cur_stream->ic->nb_chapters <= 1) {
3425  incr = -600.0;
3426  goto do_seek;
3427  }
3428  seek_chapter(cur_stream, -1);
3429  break;
3430  case SDLK_LEFT:
3431  incr = seek_interval ? -seek_interval : -10.0;
3432  goto do_seek;
3433  case SDLK_RIGHT:
3434  incr = seek_interval ? seek_interval : 10.0;
3435  goto do_seek;
3436  case SDLK_UP:
3437  incr = 60.0;
3438  goto do_seek;
3439  case SDLK_DOWN:
3440  incr = -60.0;
3441  do_seek:
3442  if (seek_by_bytes) {
3443  pos = -1;
3444  if (pos < 0 && cur_stream->video_stream >= 0)
3445  pos = frame_queue_last_pos(&cur_stream->pictq);
3446  if (pos < 0 && cur_stream->audio_stream >= 0)
3447  pos = frame_queue_last_pos(&cur_stream->sampq);
3448  if (pos < 0)
3449  pos = avio_tell(cur_stream->ic->pb);
3450  if (cur_stream->ic->bit_rate)
3451  incr *= cur_stream->ic->bit_rate / 8.0;
3452  else
3453  incr *= 180000.0;
3454  pos += incr;
3455  stream_seek(cur_stream, pos, incr, 1);
3456  } else {
3457  pos = get_master_clock(cur_stream);
3458  if (isnan(pos))
3459  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3460  pos += incr;
3461  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3462  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3463  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3464  }
3465  break;
3466  default:
3467  break;
3468  }
3469  break;
3470  case SDL_MOUSEBUTTONDOWN:
3471  if (exit_on_mousedown) {
3472  do_exit(cur_stream);
3473  break;
3474  }
3475  if (event.button.button == SDL_BUTTON_LEFT) {
3476  static int64_t last_mouse_left_click = 0;
3477  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3478  toggle_full_screen(cur_stream);
3479  cur_stream->force_refresh = 1;
3480  last_mouse_left_click = 0;
3481  } else {
3482  last_mouse_left_click = av_gettime_relative();
3483  }
3484  }
3485  case SDL_MOUSEMOTION:
3486  if (cursor_hidden) {
3487  SDL_ShowCursor(1);
3488  cursor_hidden = 0;
3489  }
3491  if (event.type == SDL_MOUSEBUTTONDOWN) {
3492  if (event.button.button != SDL_BUTTON_RIGHT)
3493  break;
3494  x = event.button.x;
3495  } else {
3496  if (!(event.motion.state & SDL_BUTTON_RMASK))
3497  break;
3498  x = event.motion.x;
3499  }
3500  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3501  uint64_t size = avio_size(cur_stream->ic->pb);
3502  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3503  } else {
3504  int64_t ts;
3505  int ns, hh, mm, ss;
3506  int tns, thh, tmm, tss;
3507  tns = cur_stream->ic->duration / 1000000LL;
3508  thh = tns / 3600;
3509  tmm = (tns % 3600) / 60;
3510  tss = (tns % 60);
3511  frac = x / cur_stream->width;
3512  ns = frac * tns;
3513  hh = ns / 3600;
3514  mm = (ns % 3600) / 60;
3515  ss = (ns % 60);
3517  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3518  hh, mm, ss, thh, tmm, tss);
3519  ts = frac * cur_stream->ic->duration;
3520  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3521  ts += cur_stream->ic->start_time;
3522  stream_seek(cur_stream, ts, 0, 0);
3523  }
3524  break;
3525  case SDL_WINDOWEVENT:
3526  switch (event.window.event) {
3527  case SDL_WINDOWEVENT_SIZE_CHANGED:
3528  screen_width = cur_stream->width = event.window.data1;
3529  screen_height = cur_stream->height = event.window.data2;
3530  if (cur_stream->vis_texture) {
3531  SDL_DestroyTexture(cur_stream->vis_texture);
3532  cur_stream->vis_texture = NULL;
3533  }
3534  if (vk_renderer)
3536  case SDL_WINDOWEVENT_EXPOSED:
3537  cur_stream->force_refresh = 1;
3538  }
3539  break;
3540  case SDL_QUIT:
3541  case FF_QUIT_EVENT:
3542  do_exit(cur_stream);
3543  break;
3544  default:
3545  break;
3546  }
3547  }
3548 }
3549 
3550 static int opt_width(void *optctx, const char *opt, const char *arg)
3551 {
3552  double num;
3553  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3554  if (ret < 0)
3555  return ret;
3556 
3557  screen_width = num;
3558  return 0;
3559 }
3560 
3561 static int opt_height(void *optctx, const char *opt, const char *arg)
3562 {
3563  double num;
3564  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3565  if (ret < 0)
3566  return ret;
3567 
3568  screen_height = num;
3569  return 0;
3570 }
3571 
3572 static int opt_format(void *optctx, const char *opt, const char *arg)
3573 {
3575  if (!file_iformat) {
3576  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3577  return AVERROR(EINVAL);
3578  }
3579  return 0;
3580 }
3581 
3582 static int opt_sync(void *optctx, const char *opt, const char *arg)
3583 {
3584  if (!strcmp(arg, "audio"))
3586  else if (!strcmp(arg, "video"))
3588  else if (!strcmp(arg, "ext"))
3590  else {
3591  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3592  exit(1);
3593  }
3594  return 0;
3595 }
3596 
3597 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3598 {
3599  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3600  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3601  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3602 
3603  if (show_mode == SHOW_MODE_NONE) {
3604  double num;
3605  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3606  if (ret < 0)
3607  return ret;
3608  show_mode = num;
3609  }
3610  return 0;
3611 }
3612 
3613 static int opt_input_file(void *optctx, const char *filename)
3614 {
3615  if (input_filename) {
3617  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3618  filename, input_filename);
3619  return AVERROR(EINVAL);
3620  }
3621  if (!strcmp(filename, "-"))
3622  filename = "fd:";
3623  input_filename = av_strdup(filename);
3624  if (!input_filename)
3625  return AVERROR(ENOMEM);
3626 
3627  return 0;
3628 }
3629 
3630 static int opt_codec(void *optctx, const char *opt, const char *arg)
3631 {
3632  const char *spec = strchr(opt, ':');
3633  const char **name;
3634  if (!spec) {
3636  "No media specifier was specified in '%s' in option '%s'\n",
3637  arg, opt);
3638  return AVERROR(EINVAL);
3639  }
3640  spec++;
3641 
3642  switch (spec[0]) {
3643  case 'a' : name = &audio_codec_name; break;
3644  case 's' : name = &subtitle_codec_name; break;
3645  case 'v' : name = &video_codec_name; break;
3646  default:
3648  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3649  return AVERROR(EINVAL);
3650  }
3651 
3652  av_freep(name);
3653  *name = av_strdup(arg);
3654  return *name ? 0 : AVERROR(ENOMEM);
3655 }
3656 
3657 static int dummy;
3658 
3659 static const OptionDef options[] = {
3661  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3662  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3663  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3664  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3665  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3666  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3667  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3668  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3669  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3670  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3671  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3672  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3673  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3674  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3675  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3676  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3677  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3678  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3679  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3680  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3681  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3682  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3683  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3684  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3685  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3686  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3687  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3688  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3689  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3690  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3691  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3692  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3693  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3694  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3695  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3696  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3697  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3698  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3699  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3700  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3701  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3702  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3703  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3704  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3705  "read and decode the streams to fill missing information with heuristics" },
3706  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3707  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3708  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3709  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3710  { NULL, },
3711 };
3712 
3713 static void show_usage(void)
3714 {
3715  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3716  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3717  av_log(NULL, AV_LOG_INFO, "\n");
3718 }
3719 
3720 void show_help_default(const char *opt, const char *arg)
3721 {
3723  show_usage();
3724  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3725  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3726  printf("\n");
3730  printf("\nWhile playing:\n"
3731  "q, ESC quit\n"
3732  "f toggle full screen\n"
3733  "p, SPC pause\n"
3734  "m toggle mute\n"
3735  "9, 0 decrease and increase volume respectively\n"
3736  "/, * decrease and increase volume respectively\n"
3737  "a cycle audio channel in the current program\n"
3738  "v cycle video channel\n"
3739  "t cycle subtitle channel in the current program\n"
3740  "c cycle program\n"
3741  "w cycle video filters or show modes\n"
3742  "s activate frame-step mode\n"
3743  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3744  "down/up seek backward/forward 1 minute\n"
3745  "page down/page up seek backward/forward 10 minutes\n"
3746  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3747  "left double-click toggle full screen\n"
3748  );
3749 }
3750 
3751 /* Called from the main */
3752 int main(int argc, char **argv)
3753 {
3754  int flags, ret;
3755  VideoState *is;
3756 
3757  init_dynload();
3758 
3760  parse_loglevel(argc, argv, options);
3761 
3762  /* register all codecs, demux and protocols */
3763 #if CONFIG_AVDEVICE
3765 #endif
3767 
3768  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3769  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3770 
3771  show_banner(argc, argv, options);
3772 
3773  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3774  if (ret < 0)
3775  exit(ret == AVERROR_EXIT ? 0 : 1);
3776 
3777  if (!input_filename) {
3778  show_usage();
3779  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3781  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3782  exit(1);
3783  }
3784 
3785  if (display_disable) {
3786  video_disable = 1;
3787  }
3788  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3789  if (audio_disable)
3790  flags &= ~SDL_INIT_AUDIO;
3791  else {
3792  /* Try to work around an occasional ALSA buffer underflow issue when the
3793  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3794  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3795  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3796  }
3797  if (display_disable)
3798  flags &= ~SDL_INIT_VIDEO;
3799  if (SDL_Init (flags)) {
3800  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3801  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3802  exit(1);
3803  }
3804 
3805  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3806  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3807 
3808  if (!display_disable) {
3809  int flags = SDL_WINDOW_HIDDEN;
3810  if (alwaysontop)
3811 #if SDL_VERSION_ATLEAST(2,0,5)
3812  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3813 #else
3814  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3815 #endif
3816  if (borderless)
3817  flags |= SDL_WINDOW_BORDERLESS;
3818  else
3819  flags |= SDL_WINDOW_RESIZABLE;
3820 
3821 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3822  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3823 #endif
3824  if (hwaccel && !enable_vulkan) {
3825  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3826  enable_vulkan = 1;
3827  }
3828  if (enable_vulkan) {
3830  if (vk_renderer) {
3831 #if SDL_VERSION_ATLEAST(2, 0, 6)
3832  flags |= SDL_WINDOW_VULKAN;
3833 #endif
3834  } else {
3835  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3836  enable_vulkan = 0;
3837  }
3838  }
3839  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3840  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3841  if (!window) {
3842  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3843  do_exit(NULL);
3844  }
3845 
3846  if (vk_renderer) {
3847  AVDictionary *dict = NULL;
3848 
3849  if (vulkan_params) {
3850  int ret = av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3851  if (ret < 0) {
3852  av_log(NULL, AV_LOG_FATAL, "Failed to parse, %s\n", vulkan_params);
3853  do_exit(NULL);
3854  }
3855  }
3857  av_dict_free(&dict);
3858  if (ret < 0) {
3859  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3860  do_exit(NULL);
3861  }
3862  } else {
3863  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3864  if (!renderer) {
3865  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3866  renderer = SDL_CreateRenderer(window, -1, 0);
3867  }
3868  if (renderer) {
3869  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3870  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3871  }
3872  if (!renderer || !renderer_info.num_texture_formats) {
3873  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3874  do_exit(NULL);
3875  }
3876  }
3877  }
3878 
3880  if (!is) {
3881  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3882  do_exit(NULL);
3883  }
3884 
3885  event_loop(is);
3886 
3887  /* never returns */
3888 
3889  return 0;
3890 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:605
AVSubtitle
Definition: avcodec.h:2238
rect::w
int w
Definition: f_ebur128.c:76
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2511
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1301
AVBufferSrcParameters::color_space
enum AVColorSpace color_space
Video only, the YUV colorspace and range.
Definition: buffersrc.h:121
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:212
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:105
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:429
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:215
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:353
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:159
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:100
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:263
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:132
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2243
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:363
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:148
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1477
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:801
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:2004
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:393
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:785
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:191
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:807
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:655
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1229
check_avoptions
int check_avoptions(AVDictionary *m)
Definition: cmdutils.c:1529
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:807
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:450
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:264
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1056
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2165
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:231
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:951
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1333
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:462
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1400
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:191
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:173
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:241
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:578
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:674
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:176
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:142
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:819
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:399
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:904
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:76
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1519
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:479
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:225
int64_t
long long int64_t
Definition: coverity.c:34
screen_left
static int screen_left
Definition: ffplay.c:312
AudioParams::frame_size
int frame_size
Definition: ffplay.c:133
AVSubtitleRect
Definition: avcodec.h:2211
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2242
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:196
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2154
rect::y
int y
Definition: f_ebur128.c:76
FrameQueue::size
int size
Definition: ffplay.c:171
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:197
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:162
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:162
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1751
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:389
AudioParams
Definition: ffplay.c:129
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:251
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1368
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:461
VideoState::xleft
int xleft
Definition: ffplay.c:289
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:390
Frame::pts
double pts
Definition: ffplay.c:156
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:696
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:233
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:688
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
AVPacket::data
uint8_t * data
Definition: packet.h:539
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:182
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:66
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:173
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:812
AVChapter::start
int64_t start
Definition: avformat.h:1262
Clock
Definition: ffplay.c:137
data
const char data[16]
Definition: mxf.c:149
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:61
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:188
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2491
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:461
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:236
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:102
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:225
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:75
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:557
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:370
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:163
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1223
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:316
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1534
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:152
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3613
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1265
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1538
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:72
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:833
VideoState::paused
int paused
Definition: ffplay.c:206
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:321
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1415
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AVBufferSrcParameters::height
int height
Definition: buffersrc.h:87
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1448
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
dummy
static int dummy
Definition: ffplay.c:3657
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:359
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PacketQueue
Definition: ffplay.c:113
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2268
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:594
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:410
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:258
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:117
OptionDef
Definition: cmdutils.h:191
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2371
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:221
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:253
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3582
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1526
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:905
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:223
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:369
FrameQueue::rindex
int rindex
Definition: ffplay.c:169
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1369
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:363
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:625
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1573
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:70
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:361
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
A convenience wrapper that allocates and initializes a filter in a single step.
Definition: avfiltergraph.c:138
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:183
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:165
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3290
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:144
VideoState::extclk
Clock extclk
Definition: ffplay.c:219
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:210
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:239
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:469
AVPacket::opaque_ref
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: packet.h:575
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1071
fail
#define fail()
Definition: checkasm.h:188
AVBufferSrcParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only, the sample (pixel) aspect ratio.
Definition: buffersrc.h:92
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
FrameQueue
Definition: ffplay.c:167
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:440
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2212
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1259
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:961
val
static double val(void *priv, double ch)
Definition: aeval.c:77
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:790
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3597
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:193
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1409
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:237
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:827
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:235
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:551
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:265
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3561
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:397
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:150
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1442
is_full_screen
static int is_full_screen
Definition: ffplay.c:356
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:982
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:945
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:164
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:235
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:209
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1535
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:805
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:548
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2076
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1403
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:837
VideoState
Definition: ffplay.c:201
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:733
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2481
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1423
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
Definition: buffersink.c:333
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:215
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:721
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:648
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
VideoState::ShowMode
ShowMode
Definition: ffplay.c:257
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:189
s
#define s(width, name)
Definition:<