FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <inttypes.h>
29 #include <math.h>
30 #include <limits.h>
31 #include <signal.h>
32 #include <stdint.h>
33 
34 #include "libavutil/avstring.h"
36 #include "libavutil/eval.h"
37 #include "libavutil/mathematics.h"
38 #include "libavutil/pixdesc.h"
39 #include "libavutil/imgutils.h"
40 #include "libavutil/dict.h"
41 #include "libavutil/fifo.h"
42 #include "libavutil/parseutils.h"
43 #include "libavutil/samplefmt.h"
44 #include "libavutil/time.h"
45 #include "libavutil/bprint.h"
46 #include "libavformat/avformat.h"
47 #include "libavdevice/avdevice.h"
48 #include "libswscale/swscale.h"
49 #include "libavutil/opt.h"
50 #include "libavutil/tx.h"
52 
53 #include "libavfilter/avfilter.h"
54 #include "libavfilter/buffersink.h"
55 #include "libavfilter/buffersrc.h"
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 #include "ffplay_renderer.h"
62 #include "opt_common.h"
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 25
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 
72 /* Minimum SDL audio buffer size, in samples. */
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 
77 /* Step size for volume control in dB */
78 #define SDL_VOLUME_STEP (0.75)
79 
80 /* no AV sync correction is done if below the minimum AV sync threshold */
81 #define AV_SYNC_THRESHOLD_MIN 0.04
82 /* AV sync correction is done if above the maximum AV sync threshold */
83 #define AV_SYNC_THRESHOLD_MAX 0.1
84 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
85 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 /* no AV correction is done if too big error */
87 #define AV_NOSYNC_THRESHOLD 10.0
88 
89 /* maximum audio speed change to get correct sync */
90 #define SAMPLE_CORRECTION_PERCENT_MAX 10
91 
92 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
93 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
94 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
95 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
96 
97 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
98 #define AUDIO_DIFF_AVG_NB 20
99 
100 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
101 #define REFRESH_RATE 0.01
102 
103 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
104 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
105 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 
107 #define CURSOR_HIDE_DELAY 1000000
108 
109 #define USE_ONEPASS_SUBTITLE_RENDER 1
110 
111 typedef struct MyAVPacketList {
113  int serial;
115 
116 typedef struct PacketQueue {
119  int size;
120  int64_t duration;
122  int serial;
123  SDL_mutex *mutex;
124  SDL_cond *cond;
125 } PacketQueue;
126 
127 #define VIDEO_PICTURE_QUEUE_SIZE 3
128 #define SUBPICTURE_QUEUE_SIZE 16
129 #define SAMPLE_QUEUE_SIZE 9
130 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
131 
132 typedef struct AudioParams {
133  int freq;
138 } AudioParams;
139 
140 typedef struct Clock {
141  double pts; /* clock base */
142  double pts_drift; /* clock base minus time at which we updated the clock */
143  double last_updated;
144  double speed;
145  int serial; /* clock is based on a packet with this serial */
146  int paused;
147  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
148 } Clock;
149 
150 typedef struct FrameData {
151  int64_t pkt_pos;
152 } FrameData;
153 
154 /* Common struct for handling all types of decoded data and allocated render buffers. */
155 typedef struct Frame {
158  int serial;
159  double pts; /* presentation timestamp for the frame */
160  double duration; /* estimated duration of the frame */
161  int64_t pos; /* byte position of the frame in the input file */
162  int width;
163  int height;
164  int format;
166  int uploaded;
167  int flip_v;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
194  int finished;
196  SDL_cond *empty_queue_cond;
197  int64_t start_pts;
199  int64_t next_pts;
201  SDL_Thread *decoder_tid;
202 } Decoder;
203 
204 typedef struct VideoState {
205  SDL_Thread *read_tid;
209  int paused;
212  int seek_req;
214  int64_t seek_pos;
215  int64_t seek_rel;
218  int realtime;
219 
223 
227 
231 
233 
235 
236  double audio_clock;
238  double audio_diff_cum; /* used for AV difference average computation */
245  uint8_t *audio_buf;
246  uint8_t *audio_buf1;
247  unsigned int audio_buf_size; /* in bytes */
248  unsigned int audio_buf1_size;
249  int audio_buf_index; /* in bytes */
252  int muted;
259 
260  enum ShowMode {
262  } show_mode;
269  float *real_data;
271  int xpos;
273  SDL_Texture *vis_texture;
274  SDL_Texture *sub_texture;
275  SDL_Texture *vid_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
296  AVFilterContext *in_video_filter; // the first filter in the video chain
297  AVFilterContext *out_video_filter; // the last filter in the video chain
298  AVFilterContext *in_audio_filter; // the first filter in the audio chain
299  AVFilterContext *out_audio_filter; // the last filter in the audio chain
300  AVFilterGraph *agraph; // audio filter graph
301 
303 
305 } VideoState;
306 
307 /* options specified by the user */
309 static const char *input_filename;
310 static const char *window_title;
311 static int default_width = 640;
312 static int default_height = 480;
313 static int screen_width = 0;
314 static int screen_height = 0;
315 static int screen_left = SDL_WINDOWPOS_CENTERED;
316 static int screen_top = SDL_WINDOWPOS_CENTERED;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static float seek_interval = 10;
323 static int display_disable;
324 static int borderless;
325 static int alwaysontop;
326 static int startup_volume = 100;
327 static int show_status = -1;
329 static int64_t start_time = AV_NOPTS_VALUE;
330 static int64_t duration = AV_NOPTS_VALUE;
331 static int fast = 0;
332 static int genpts = 0;
333 static int lowres = 0;
334 static int decoder_reorder_pts = -1;
335 static int autoexit;
336 static int exit_on_keydown;
337 static int exit_on_mousedown;
338 static int loop = 1;
339 static int framedrop = -1;
340 static int infinite_buffer = -1;
341 static enum ShowMode show_mode = SHOW_MODE_NONE;
342 static const char *audio_codec_name;
343 static const char *subtitle_codec_name;
344 static const char *video_codec_name;
345 double rdftspeed = 0.02;
346 static int64_t cursor_last_shown;
347 static int cursor_hidden = 0;
348 static const char **vfilters_list = NULL;
349 static int nb_vfilters = 0;
350 static char *afilters = NULL;
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 static int filter_nbthreads = 0;
354 static int enable_vulkan = 0;
355 static char *vulkan_params = NULL;
356 static const char *hwaccel = NULL;
357 
358 /* current context */
359 static int is_full_screen;
360 static int64_t audio_callback_time;
361 
362 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
363 
364 static SDL_Window *window;
365 static SDL_Renderer *renderer;
366 static SDL_RendererInfo renderer_info = {0};
367 static SDL_AudioDeviceID audio_dev;
368 
370 
371 static const struct TextureFormatEntry {
375  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
376  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
377  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
378  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
379  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
380  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
381  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
382  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
383  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
384  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
385  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
386  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
387  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
388  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
389  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
390  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
391  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
392  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
393  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
394  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
395 };
396 
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
400  if (ret < 0)
401  return ret;
402 
404  if (!vfilters_list[nb_vfilters - 1])
405  return AVERROR(ENOMEM);
406 
407  return 0;
408 }
409 
410 static inline
411 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
412  enum AVSampleFormat fmt2, int64_t channel_count2)
413 {
414  /* If channel count == 1, planar and non-planar formats are the same */
415  if (channel_count1 == 1 && channel_count2 == 1)
417  else
418  return channel_count1 != channel_count2 || fmt1 != fmt2;
419 }
420 
422 {
423  MyAVPacketList pkt1;
424  int ret;
425 
426  if (q->abort_request)
427  return -1;
428 
429 
430  pkt1.pkt = pkt;
431  pkt1.serial = q->serial;
432 
433  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
434  if (ret < 0)
435  return ret;
436  q->nb_packets++;
437  q->size += pkt1.pkt->size + sizeof(pkt1);
438  q->duration += pkt1.pkt->duration;
439  /* XXX: should duplicate packet data in DV case */
440  SDL_CondSignal(q->cond);
441  return 0;
442 }
443 
445 {
446  AVPacket *pkt1;
447  int ret;
448 
449  pkt1 = av_packet_alloc();
450  if (!pkt1) {
452  return -1;
453  }
454  av_packet_move_ref(pkt1, pkt);
455 
456  SDL_LockMutex(q->mutex);
457  ret = packet_queue_put_private(q, pkt1);
458  SDL_UnlockMutex(q->mutex);
459 
460  if (ret < 0)
461  av_packet_free(&pkt1);
462 
463  return ret;
464 }
465 
466 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
467 {
468  pkt->stream_index = stream_index;
469  return packet_queue_put(q, pkt);
470 }
471 
472 /* packet queue handling */
474 {
475  memset(q, 0, sizeof(PacketQueue));
477  if (!q->pkt_list)
478  return AVERROR(ENOMEM);
479  q->mutex = SDL_CreateMutex();
480  if (!q->mutex) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->cond = SDL_CreateCond();
485  if (!q->cond) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->abort_request = 1;
490  return 0;
491 }
492 
494 {
495  MyAVPacketList pkt1;
496 
497  SDL_LockMutex(q->mutex);
498  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
499  av_packet_free(&pkt1.pkt);
500  q->nb_packets = 0;
501  q->size = 0;
502  q->duration = 0;
503  q->serial++;
504  SDL_UnlockMutex(q->mutex);
505 }
506 
508 {
511  SDL_DestroyMutex(q->mutex);
512  SDL_DestroyCond(q->cond);
513 }
514 
516 {
517  SDL_LockMutex(q->mutex);
518 
519  q->abort_request = 1;
520 
521  SDL_CondSignal(q->cond);
522 
523  SDL_UnlockMutex(q->mutex);
524 }
525 
527 {
528  SDL_LockMutex(q->mutex);
529  q->abort_request = 0;
530  q->serial++;
531  SDL_UnlockMutex(q->mutex);
532 }
533 
534 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
535 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
536 {
537  MyAVPacketList pkt1;
538  int ret;
539 
540  SDL_LockMutex(q->mutex);
541 
542  for (;;) {
543  if (q->abort_request) {
544  ret = -1;
545  break;
546  }
547 
548  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
549  q->nb_packets--;
550  q->size -= pkt1.pkt->size + sizeof(pkt1);
551  q->duration -= pkt1.pkt->duration;
552  av_packet_move_ref(pkt, pkt1.pkt);
553  if (serial)
554  *serial = pkt1.serial;
555  av_packet_free(&pkt1.pkt);
556  ret = 1;
557  break;
558  } else if (!block) {
559  ret = 0;
560  break;
561  } else {
562  SDL_CondWait(q->cond, q->mutex);
563  }
564  }
565  SDL_UnlockMutex(q->mutex);
566  return ret;
567 }
568 
569 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
570  memset(d, 0, sizeof(Decoder));
571  d->pkt = av_packet_alloc();
572  if (!d->pkt)
573  return AVERROR(ENOMEM);
574  d->avctx = avctx;
575  d->queue = queue;
576  d->empty_queue_cond = empty_queue_cond;
577  d->start_pts = AV_NOPTS_VALUE;
578  d->pkt_serial = -1;
579  return 0;
580 }
581 
583  int ret = AVERROR(EAGAIN);
584 
585  for (;;) {
586  if (d->queue->serial == d->pkt_serial) {
587  do {
588  if (d->queue->abort_request)
589  return -1;
590 
591  switch (d->avctx->codec_type) {
592  case AVMEDIA_TYPE_VIDEO:
593  ret = avcodec_receive_frame(d->avctx, frame);
594  if (ret >= 0) {
595  if (decoder_reorder_pts == -1) {
596  frame->pts = frame->best_effort_timestamp;
597  } else if (!decoder_reorder_pts) {
598  frame->pts = frame->pkt_dts;
599  }
600  }
601  break;
602  case AVMEDIA_TYPE_AUDIO:
603  ret = avcodec_receive_frame(d->avctx, frame);
604  if (ret >= 0) {
605  AVRational tb = (AVRational){1, frame->sample_rate};
606  if (frame->pts != AV_NOPTS_VALUE)
607  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
608  else if (d->next_pts != AV_NOPTS_VALUE)
609  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
610  if (frame->pts != AV_NOPTS_VALUE) {
611  d->next_pts = frame->pts + frame->nb_samples;
612  d->next_pts_tb = tb;
613  }
614  }
615  break;
616  }
617  if (ret == AVERROR_EOF) {
618  d->finished = d->pkt_serial;
619  avcodec_flush_buffers(d->avctx);
620  return 0;
621  }
622  if (ret >= 0)
623  return 1;
624  } while (ret != AVERROR(EAGAIN));
625  }
626 
627  do {
628  if (d->queue->nb_packets == 0)
629  SDL_CondSignal(d->empty_queue_cond);
630  if (d->packet_pending) {
631  d->packet_pending = 0;
632  } else {
633  int old_serial = d->pkt_serial;
634  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
635  return -1;
636  if (old_serial != d->pkt_serial) {
637  avcodec_flush_buffers(d->avctx);
638  d->finished = 0;
639  d->next_pts = d->start_pts;
640  d->next_pts_tb = d->start_pts_tb;
641  }
642  }
643  if (d->queue->serial == d->pkt_serial)
644  break;
645  av_packet_unref(d->pkt);
646  } while (1);
647 
648  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
649  int got_frame = 0;
650  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
651  if (ret < 0) {
652  ret = AVERROR(EAGAIN);
653  } else {
654  if (got_frame && !d->pkt->data) {
655  d->packet_pending = 1;
656  }
657  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
658  }
659  av_packet_unref(d->pkt);
660  } else {
661  if (d->pkt->buf && !d->pkt->opaque_ref) {
662  FrameData *fd;
663 
664  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
665  if (!d->pkt->opaque_ref)
666  return AVERROR(ENOMEM);
667  fd = (FrameData*)d->pkt->opaque_ref->data;
668  fd->pkt_pos = d->pkt->pos;
669  }
670 
671  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
672  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
673  d->packet_pending = 1;
674  } else {
675  av_packet_unref(d->pkt);
676  }
677  }
678  }
679 }
680 
681 static void decoder_destroy(Decoder *d) {
682  av_packet_free(&d->pkt);
683  avcodec_free_context(&d->avctx);
684 }
685 
687 {
688  av_frame_unref(vp->frame);
689  avsubtitle_free(&vp->sub);
690 }
691 
692 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
693 {
694  int i;
695  memset(f, 0, sizeof(FrameQueue));
696  if (!(f->mutex = SDL_CreateMutex())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  if (!(f->cond = SDL_CreateCond())) {
701  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
702  return AVERROR(ENOMEM);
703  }
704  f->pktq = pktq;
705  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
706  f->keep_last = !!keep_last;
707  for (i = 0; i < f->max_size; i++)
708  if (!(f->queue[i].frame = av_frame_alloc()))
709  return AVERROR(ENOMEM);
710  return 0;
711 }
712 
714 {
715  int i;
716  for (i = 0; i < f->max_size; i++) {
717  Frame *vp = &f->queue[i];
719  av_frame_free(&vp->frame);
720  }
721  SDL_DestroyMutex(f->mutex);
722  SDL_DestroyCond(f->cond);
723 }
724 
726 {
727  SDL_LockMutex(f->mutex);
728  SDL_CondSignal(f->cond);
729  SDL_UnlockMutex(f->mutex);
730 }
731 
733 {
734  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
735 }
736 
738 {
739  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
740 }
741 
743 {
744  return &f->queue[f->rindex];
745 }
746 
748 {
749  /* wait until we have space to put a new frame */
750  SDL_LockMutex(f->mutex);
751  while (f->size >= f->max_size &&
752  !f->pktq->abort_request) {
753  SDL_CondWait(f->cond, f->mutex);
754  }
755  SDL_UnlockMutex(f->mutex);
756 
757  if (f->pktq->abort_request)
758  return NULL;
759 
760  return &f->queue[f->windex];
761 }
762 
764 {
765  /* wait until we have a readable a new frame */
766  SDL_LockMutex(f->mutex);
767  while (f->size - f->rindex_shown <= 0 &&
768  !f->pktq->abort_request) {
769  SDL_CondWait(f->cond, f->mutex);
770  }
771  SDL_UnlockMutex(f->mutex);
772 
773  if (f->pktq->abort_request)
774  return NULL;
775 
776  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
777 }
778 
780 {
781  if (++f->windex == f->max_size)
782  f->windex = 0;
783  SDL_LockMutex(f->mutex);
784  f->size++;
785  SDL_CondSignal(f->cond);
786  SDL_UnlockMutex(f->mutex);
787 }
788 
790 {
791  if (f->keep_last && !f->rindex_shown) {
792  f->rindex_shown = 1;
793  return;
794  }
795  frame_queue_unref_item(&f->queue[f->rindex]);
796  if (++f->rindex == f->max_size)
797  f->rindex = 0;
798  SDL_LockMutex(f->mutex);
799  f->size--;
800  SDL_CondSignal(f->cond);
801  SDL_UnlockMutex(f->mutex);
802 }
803 
804 /* return the number of undisplayed frames in the queue */
806 {
807  return f->size - f->rindex_shown;
808 }
809 
810 /* return last shown position */
812 {
813  Frame *fp = &f->queue[f->rindex];
814  if (f->rindex_shown && fp->serial == f->pktq->serial)
815  return fp->pos;
816  else
817  return -1;
818 }
819 
820 static void decoder_abort(Decoder *d, FrameQueue *fq)
821 {
822  packet_queue_abort(d->queue);
823  frame_queue_signal(fq);
824  SDL_WaitThread(d->decoder_tid, NULL);
825  d->decoder_tid = NULL;
826  packet_queue_flush(d->queue);
827 }
828 
829 static inline void fill_rectangle(int x, int y, int w, int h)
830 {
831  SDL_Rect rect;
832  rect.x = x;
833  rect.y = y;
834  rect.w = w;
835  rect.h = h;
836  if (w && h)
837  SDL_RenderFillRect(renderer, &rect);
838 }
839 
840 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
841 {
842  Uint32 format;
843  int access, w, h;
844  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
845  void *pixels;
846  int pitch;
847  if (*texture)
848  SDL_DestroyTexture(*texture);
849  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
850  return -1;
851  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
852  return -1;
853  if (init_texture) {
854  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
855  return -1;
856  memset(pixels, 0, pitch * new_height);
857  SDL_UnlockTexture(*texture);
858  }
859  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
860  }
861  return 0;
862 }
863 
864 static void calculate_display_rect(SDL_Rect *rect,
865  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
866  int pic_width, int pic_height, AVRational pic_sar)
867 {
868  AVRational aspect_ratio = pic_sar;
869  int64_t width, height, x, y;
870 
871  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
872  aspect_ratio = av_make_q(1, 1);
873 
874  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
875 
876  /* XXX: we suppose the screen has a 1.0 pixel ratio */
877  height = scr_height;
878  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
879  if (width > scr_width) {
880  width = scr_width;
881  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
882  }
883  x = (scr_width - width) / 2;
884  y = (scr_height - height) / 2;
885  rect->x = scr_xleft + x;
886  rect->y = scr_ytop + y;
887  rect->w = FFMAX((int)width, 1);
888  rect->h = FFMAX((int)height, 1);
889 }
890 
891 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
892 {
893  int i;
894  *sdl_blendmode = SDL_BLENDMODE_NONE;
895  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
896  if (format == AV_PIX_FMT_RGB32 ||
900  *sdl_blendmode = SDL_BLENDMODE_BLEND;
901  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
903  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
904  return;
905  }
906  }
907 }
908 
909 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
910 {
911  int ret = 0;
912  Uint32 sdl_pix_fmt;
913  SDL_BlendMode sdl_blendmode;
914  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
915  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
916  return -1;
917  switch (sdl_pix_fmt) {
918  case SDL_PIXELFORMAT_IYUV:
919  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
920  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
921  frame->data[1], frame->linesize[1],
922  frame->data[2], frame->linesize[2]);
923  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
924  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
925  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
926  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
927  } else {
928  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
929  return -1;
930  }
931  break;
932  default:
933  if (frame->linesize[0] < 0) {
934  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
935  } else {
936  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
937  }
938  break;
939  }
940  return ret;
941 }
942 
948 };
949 
951 {
952 #if SDL_VERSION_ATLEAST(2,0,8)
953  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
954  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
955  if (frame->color_range == AVCOL_RANGE_JPEG)
956  mode = SDL_YUV_CONVERSION_JPEG;
957  else if (frame->colorspace == AVCOL_SPC_BT709)
958  mode = SDL_YUV_CONVERSION_BT709;
959  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
960  mode = SDL_YUV_CONVERSION_BT601;
961  }
962  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
963 #endif
964 }
965 
967 {
968  Frame *vp;
969  Frame *sp = NULL;
970  SDL_Rect rect;
971 
972  vp = frame_queue_peek_last(&is->pictq);
973  if (vk_renderer) {
975  return;
976  }
977 
978  if (is->subtitle_st) {
979  if (frame_queue_nb_remaining(&is->subpq) > 0) {
980  sp = frame_queue_peek(&is->subpq);
981 
982  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
983  if (!sp->uploaded) {
984  uint8_t* pixels[4];
985  int pitch[4];
986  int i;
987  if (!sp->width || !sp->height) {
988  sp->width = vp->width;
989  sp->height = vp->height;
990  }
991  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
992  return;
993 
994  for (i = 0; i < sp->sub.num_rects; i++) {
995  AVSubtitleRect *sub_rect = sp->sub.rects[i];
996 
997  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
998  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
999  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1000  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1001 
1002  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1003  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1004  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1005  0, NULL, NULL, NULL);
1006  if (!is->sub_convert_ctx) {
1007  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1008  return;
1009  }
1010  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1011  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1012  0, sub_rect->h, pixels, pitch);
1013  SDL_UnlockTexture(is->sub_texture);
1014  }
1015  }
1016  sp->uploaded = 1;
1017  }
1018  } else
1019  sp = NULL;
1020  }
1021  }
1022 
1023  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1025 
1026  if (!vp->uploaded) {
1027  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1029  return;
1030  }
1031  vp->uploaded = 1;
1032  vp->flip_v = vp->frame->linesize[0] < 0;
1033  }
1034 
1035  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1037  if (sp) {
1038 #if USE_ONEPASS_SUBTITLE_RENDER
1039  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1040 #else
1041  int i;
1042  double xratio = (double)rect.w / (double)sp->width;
1043  double yratio = (double)rect.h / (double)sp->height;
1044  for (i = 0; i < sp->sub.num_rects; i++) {
1045  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1046  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1047  .y = rect.y + sub_rect->y * yratio,
1048  .w = sub_rect->w * xratio,
1049  .h = sub_rect->h * yratio};
1050  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1051  }
1052 #endif
1053  }
1054 }
1055 
1056 static inline int compute_mod(int a, int b)
1057 {
1058  return a < 0 ? a%b + b : a%b;
1059 }
1060 
1062 {
1063  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1064  int ch, channels, h, h2;
1065  int64_t time_diff;
1066  int rdft_bits, nb_freq;
1067 
1068  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1069  ;
1070  nb_freq = 1 << (rdft_bits - 1);
1071 
1072  /* compute display index : center on currently output samples */
1073  channels = s->audio_tgt.ch_layout.nb_channels;
1074  nb_display_channels = channels;
1075  if (!s->paused) {
1076  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1077  n = 2 * channels;
1078  delay = s->audio_write_buf_size;
1079  delay /= n;
1080 
1081  /* to be more precise, we take into account the time spent since
1082  the last buffer computation */
1083  if (audio_callback_time) {
1084  time_diff = av_gettime_relative() - audio_callback_time;
1085  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1086  }
1087 
1088  delay += 2 * data_used;
1089  if (delay < data_used)
1090  delay = data_used;
1091 
1092  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1093  if (s->show_mode == SHOW_MODE_WAVES) {
1094  h = INT_MIN;
1095  for (i = 0; i < 1000; i += channels) {
1096  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1097  int a = s->sample_array[idx];
1098  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1099  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1100  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1101  int score = a - d;
1102  if (h < score && (b ^ c) < 0) {
1103  h = score;
1104  i_start = idx;
1105  }
1106  }
1107  }
1108 
1109  s->last_i_start = i_start;
1110  } else {
1111  i_start = s->last_i_start;
1112  }
1113 
1114  if (s->show_mode == SHOW_MODE_WAVES) {
1115  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1116 
1117  /* total height for one channel */
1118  h = s->height / nb_display_channels;
1119  /* graph height / 2 */
1120  h2 = (h * 9) / 20;
1121  for (ch = 0; ch < nb_display_channels; ch++) {
1122  i = i_start + ch;
1123  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1124  for (x = 0; x < s->width; x++) {
1125  y = (s->sample_array[i] * h2) >> 15;
1126  if (y < 0) {
1127  y = -y;
1128  ys = y1 - y;
1129  } else {
1130  ys = y1;
1131  }
1132  fill_rectangle(s->xleft + x, ys, 1, y);
1133  i += channels;
1134  if (i >= SAMPLE_ARRAY_SIZE)
1135  i -= SAMPLE_ARRAY_SIZE;
1136  }
1137  }
1138 
1139  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1140 
1141  for (ch = 1; ch < nb_display_channels; ch++) {
1142  y = s->ytop + ch * h;
1143  fill_rectangle(s->xleft, y, s->width, 1);
1144  }
1145  } else {
1146  int err = 0;
1147  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1148  return;
1149 
1150  if (s->xpos >= s->width)
1151  s->xpos = 0;
1152  nb_display_channels= FFMIN(nb_display_channels, 2);
1153  if (rdft_bits != s->rdft_bits) {
1154  const float rdft_scale = 1.0;
1155  av_tx_uninit(&s->rdft);
1156  av_freep(&s->real_data);
1157  av_freep(&s->rdft_data);
1158  s->rdft_bits = rdft_bits;
1159  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1160  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1161  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1162  0, 1 << rdft_bits, &rdft_scale, 0);
1163  }
1164  if (err < 0 || !s->rdft_data) {
1165  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1166  s->show_mode = SHOW_MODE_WAVES;
1167  } else {
1168  float *data_in[2];
1169  AVComplexFloat *data[2];
1170  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1171  uint32_t *pixels;
1172  int pitch;
1173  for (ch = 0; ch < nb_display_channels; ch++) {
1174  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1175  data[ch] = s->rdft_data + nb_freq * ch;
1176  i = i_start + ch;
1177  for (x = 0; x < 2 * nb_freq; x++) {
1178  double w = (x-nb_freq) * (1.0 / nb_freq);
1179  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1180  i += channels;
1181  if (i >= SAMPLE_ARRAY_SIZE)
1182  i -= SAMPLE_ARRAY_SIZE;
1183  }
1184  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1185  data[ch][0].im = data[ch][nb_freq].re;
1186  data[ch][nb_freq].re = 0;
1187  }
1188  /* Least efficient way to do this, we should of course
1189  * directly access it but it is more than fast enough. */
1190  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1191  pitch >>= 2;
1192  pixels += pitch * s->height;
1193  for (y = 0; y < s->height; y++) {
1194  double w = 1 / sqrt(nb_freq);
1195  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1196  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1197  : a;
1198  a = FFMIN(a, 255);
1199  b = FFMIN(b, 255);
1200  pixels -= pitch;
1201  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1202  }
1203  SDL_UnlockTexture(s->vis_texture);
1204  }
1205  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1206  }
1207  if (!s->paused)
1208  s->xpos++;
1209  }
1210 }
1211 
1212 static void stream_component_close(VideoState *is, int stream_index)
1213 {
1214  AVFormatContext *ic = is->ic;
1215  AVCodecParameters *codecpar;
1216 
1217  if (stream_index < 0 || stream_index >= ic->nb_streams)
1218  return;
1219  codecpar = ic->streams[stream_index]->codecpar;
1220 
1221  switch (codecpar->codec_type) {
1222  case AVMEDIA_TYPE_AUDIO:
1223  decoder_abort(&is->auddec, &is->sampq);
1224  SDL_CloseAudioDevice(audio_dev);
1225  decoder_destroy(&is->auddec);
1226  swr_free(&is->swr_ctx);
1227  av_freep(&is->audio_buf1);
1228  is->audio_buf1_size = 0;
1229  is->audio_buf = NULL;
1230 
1231  if (is->rdft) {
1232  av_tx_uninit(&is->rdft);
1233  av_freep(&is->real_data);
1234  av_freep(&is->rdft_data);
1235  is->rdft = NULL;
1236  is->rdft_bits = 0;
1237  }
1238  break;
1239  case AVMEDIA_TYPE_VIDEO:
1240  decoder_abort(&is->viddec, &is->pictq);
1241  decoder_destroy(&is->viddec);
1242  break;
1243  case AVMEDIA_TYPE_SUBTITLE:
1244  decoder_abort(&is->subdec, &is->subpq);
1245  decoder_destroy(&is->subdec);
1246  break;
1247  default:
1248  break;
1249  }
1250 
1251  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1252  switch (codecpar->codec_type) {
1253  case AVMEDIA_TYPE_AUDIO:
1254  is->audio_st = NULL;
1255  is->audio_stream = -1;
1256  break;
1257  case AVMEDIA_TYPE_VIDEO:
1258  is->video_st = NULL;
1259  is->video_stream = -1;
1260  break;
1261  case AVMEDIA_TYPE_SUBTITLE:
1262  is->subtitle_st = NULL;
1263  is->subtitle_stream = -1;
1264  break;
1265  default:
1266  break;
1267  }
1268 }
1269 
1271 {
1272  /* XXX: use a special url_shutdown call to abort parse cleanly */
1273  is->abort_request = 1;
1274  SDL_WaitThread(is->read_tid, NULL);
1275 
1276  /* close each stream */
1277  if (is->audio_stream >= 0)
1278  stream_component_close(is, is->audio_stream);
1279  if (is->video_stream >= 0)
1280  stream_component_close(is, is->video_stream);
1281  if (is->subtitle_stream >= 0)
1282  stream_component_close(is, is->subtitle_stream);
1283 
1284  avformat_close_input(&is->ic);
1285 
1286  packet_queue_destroy(&is->videoq);
1287  packet_queue_destroy(&is->audioq);
1288  packet_queue_destroy(&is->subtitleq);
1289 
1290  /* free all pictures */
1291  frame_queue_destroy(&is->pictq);
1292  frame_queue_destroy(&is->sampq);
1293  frame_queue_destroy(&is->subpq);
1294  SDL_DestroyCond(is->continue_read_thread);
1295  sws_freeContext(is->sub_convert_ctx);
1296  av_free(is->filename);
1297  if (is->vis_texture)
1298  SDL_DestroyTexture(is->vis_texture);
1299  if (is->vid_texture)
1300  SDL_DestroyTexture(is->vid_texture);
1301  if (is->sub_texture)
1302  SDL_DestroyTexture(is->sub_texture);
1303  av_free(is);
1304 }
1305 
1306 static void do_exit(VideoState *is)
1307 {
1308  if (is) {
1309  stream_close(is);
1310  }
1311  if (renderer)
1312  SDL_DestroyRenderer(renderer);
1313  if (vk_renderer)
1315  if (window)
1316  SDL_DestroyWindow(window);
1317  uninit_opts();
1318  for (int i = 0; i < nb_vfilters; i++)
1326  if (show_status)
1327  printf("\n");
1328  SDL_Quit();
1329  av_log(NULL, AV_LOG_QUIET, "%s", "");
1330  exit(0);
1331 }
1332 
1333 static void sigterm_handler(int sig)
1334 {
1335  exit(123);
1336 }
1337 
1339 {
1340  SDL_Rect rect;
1341  int max_width = screen_width ? screen_width : INT_MAX;
1342  int max_height = screen_height ? screen_height : INT_MAX;
1343  if (max_width == INT_MAX && max_height == INT_MAX)
1344  max_height = height;
1345  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1346  default_width = rect.w;
1347  default_height = rect.h;
1348 }
1349 
1351 {
1352  int w,h;
1353 
1356 
1357  if (!window_title)
1359  SDL_SetWindowTitle(window, window_title);
1360 
1361  SDL_SetWindowSize(window, w, h);
1362  SDL_SetWindowPosition(window, screen_left, screen_top);
1363  if (is_full_screen)
1364  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1365  SDL_ShowWindow(window);
1366 
1367  is->width = w;
1368  is->height = h;
1369 
1370  return 0;
1371 }
1372 
1373 /* display the current picture, if any */
1375 {
1376  if (!is->width)
1377  video_open(is);
1378 
1379  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1380  SDL_RenderClear(renderer);
1381  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1383  else if (is->video_st)
1385  SDL_RenderPresent(renderer);
1386 }
1387 
1388 static double get_clock(Clock *c)
1389 {
1390  if (*c->queue_serial != c->serial)
1391  return NAN;
1392  if (c->paused) {
1393  return c->pts;
1394  } else {
1395  double time = av_gettime_relative() / 1000000.0;
1396  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1397  }
1398 }
1399 
1400 static void set_clock_at(Clock *c, double pts, int serial, double time)
1401 {
1402  c->pts = pts;
1403  c->last_updated = time;
1404  c->pts_drift = c->pts - time;
1405  c->serial = serial;
1406 }
1407 
1408 static void set_clock(Clock *c, double pts, int serial)
1409 {
1410  double time = av_gettime_relative() / 1000000.0;
1411  set_clock_at(c, pts, serial, time);
1412 }
1413 
1414 static void set_clock_speed(Clock *c, double speed)
1415 {
1416  set_clock(c, get_clock(c), c->serial);
1417  c->speed = speed;
1418 }
1419 
1420 static void init_clock(Clock *c, int *queue_serial)
1421 {
1422  c->speed = 1.0;
1423  c->paused = 0;
1424  c->queue_serial = queue_serial;
1425  set_clock(c, NAN, -1);
1426 }
1427 
1428 static void sync_clock_to_slave(Clock *c, Clock *slave)
1429 {
1430  double clock = get_clock(c);
1431  double slave_clock = get_clock(slave);
1432  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1433  set_clock(c, slave_clock, slave->serial);
1434 }
1435 
1437  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1438  if (is->video_st)
1439  return AV_SYNC_VIDEO_MASTER;
1440  else
1441  return AV_SYNC_AUDIO_MASTER;
1442  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1443  if (is->audio_st)
1444  return AV_SYNC_AUDIO_MASTER;
1445  else
1446  return AV_SYNC_EXTERNAL_CLOCK;
1447  } else {
1448  return AV_SYNC_EXTERNAL_CLOCK;
1449  }
1450 }
1451 
1452 /* get the current master clock value */
1454 {
1455  double val;
1456 
1457  switch (get_master_sync_type(is)) {
1458  case AV_SYNC_VIDEO_MASTER:
1459  val = get_clock(&is->vidclk);
1460  break;
1461  case AV_SYNC_AUDIO_MASTER:
1462  val = get_clock(&is->audclk);
1463  break;
1464  default:
1465  val = get_clock(&is->extclk);
1466  break;
1467  }
1468  return val;
1469 }
1470 
1472  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1473  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1475  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1476  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1478  } else {
1479  double speed = is->extclk.speed;
1480  if (speed != 1.0)
1481  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1482  }
1483 }
1484 
1485 /* seek in the stream */
1486 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1487 {
1488  if (!is->seek_req) {
1489  is->seek_pos = pos;
1490  is->seek_rel = rel;
1491  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1492  if (by_bytes)
1493  is->seek_flags |= AVSEEK_FLAG_BYTE;
1494  is->seek_req = 1;
1495  SDL_CondSignal(is->continue_read_thread);
1496  }
1497 }
1498 
1499 /* pause or resume the video */
1501 {
1502  if (is->paused) {
1503  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1504  if (is->read_pause_return != AVERROR(ENOSYS)) {
1505  is->vidclk.paused = 0;
1506  }
1507  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1508  }
1509  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1510  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1511 }
1512 
1514 {
1516  is->step = 0;
1517 }
1518 
1520 {
1521  is->muted = !is->muted;
1522 }
1523 
1524 static void update_volume(VideoState *is, int sign, double step)
1525 {
1526  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1527  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1528  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1529 }
1530 
1532 {
1533  /* if the stream is paused unpause it, then step */
1534  if (is->paused)
1536  is->step = 1;
1537 }
1538 
1539 static double compute_target_delay(double delay, VideoState *is)
1540 {
1541  double sync_threshold, diff = 0;
1542 
1543  /* update delay to follow master synchronisation source */
1545  /* if video is slave, we try to correct big delays by
1546  duplicating or deleting a frame */
1547  diff = get_clock(&is->vidclk) - get_master_clock(is);
1548 
1549  /* skip or repeat frame. We take into account the
1550  delay to compute the threshold. I still don't know
1551  if it is the best guess */
1552  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1553  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1554  if (diff <= -sync_threshold)
1555  delay = FFMAX(0, delay + diff);
1556  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1557  delay = delay + diff;
1558  else if (diff >= sync_threshold)
1559  delay = 2 * delay;
1560  }
1561  }
1562 
1563  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1564  delay, -diff);
1565 
1566  return delay;
1567 }
1568 
1569 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1570  if (vp->serial == nextvp->serial) {
1571  double duration = nextvp->pts - vp->pts;
1572  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1573  return vp->duration;
1574  else
1575  return duration;
1576  } else {
1577  return 0.0;
1578  }
1579 }
1580 
1581 static void update_video_pts(VideoState *is, double pts, int serial)
1582 {
1583  /* update current video pts */
1584  set_clock(&is->vidclk, pts, serial);
1585  sync_clock_to_slave(&is->extclk, &is->vidclk);
1586 }
1587 
1588 /* called to display each frame */
1589 static void video_refresh(void *opaque, double *remaining_time)
1590 {
1591  VideoState *is = opaque;
1592  double time;
1593 
1594  Frame *sp, *sp2;
1595 
1596  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1598 
1599  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1600  time = av_gettime_relative() / 1000000.0;
1601  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1602  video_display(is);
1603  is->last_vis_time = time;
1604  }
1605  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1606  }
1607 
1608  if (is->video_st) {
1609 retry:
1610  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1611  // nothing to do, no picture to display in the queue
1612  } else {
1613  double last_duration, duration, delay;
1614  Frame *vp, *lastvp;
1615 
1616  /* dequeue the picture */
1617  lastvp = frame_queue_peek_last(&is->pictq);
1618  vp = frame_queue_peek(&is->pictq);
1619 
1620  if (vp->serial != is->videoq.serial) {
1621  frame_queue_next(&is->pictq);
1622  goto retry;
1623  }
1624 
1625  if (lastvp->serial != vp->serial)
1626  is->frame_timer = av_gettime_relative() / 1000000.0;
1627 
1628  if (is->paused)
1629  goto display;
1630 
1631  /* compute nominal last_duration */
1632  last_duration = vp_duration(is, lastvp, vp);
1633  delay = compute_target_delay(last_duration, is);
1634 
1635  time= av_gettime_relative()/1000000.0;
1636  if (time < is->frame_timer + delay) {
1637  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1638  goto display;
1639  }
1640 
1641  is->frame_timer += delay;
1642  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1643  is->frame_timer = time;
1644 
1645  SDL_LockMutex(is->pictq.mutex);
1646  if (!isnan(vp->pts))
1647  update_video_pts(is, vp->pts, vp->serial);
1648  SDL_UnlockMutex(is->pictq.mutex);
1649 
1650  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1651  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1652  duration = vp_duration(is, vp, nextvp);
1653  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1654  is->frame_drops_late++;
1655  frame_queue_next(&is->pictq);
1656  goto retry;
1657  }
1658  }
1659 
1660  if (is->subtitle_st) {
1661  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1662  sp = frame_queue_peek(&is->subpq);
1663 
1664  if (frame_queue_nb_remaining(&is->subpq) > 1)
1665  sp2 = frame_queue_peek_next(&is->subpq);
1666  else
1667  sp2 = NULL;
1668 
1669  if (sp->serial != is->subtitleq.serial
1670  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1671  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1672  {
1673  if (sp->uploaded) {
1674  int i;
1675  for (i = 0; i < sp->sub.num_rects; i++) {
1676  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1677  uint8_t *pixels;
1678  int pitch, j;
1679 
1680  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1681  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1682  memset(pixels, 0, sub_rect->w << 2);
1683  SDL_UnlockTexture(is->sub_texture);
1684  }
1685  }
1686  }
1687  frame_queue_next(&is->subpq);
1688  } else {
1689  break;
1690  }
1691  }
1692  }
1693 
1694  frame_queue_next(&is->pictq);
1695  is->force_refresh = 1;
1696 
1697  if (is->step && !is->paused)
1699  }
1700 display:
1701  /* display picture */
1702  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1703  video_display(is);
1704  }
1705  is->force_refresh = 0;
1706  if (show_status) {
1707  AVBPrint buf;
1708  static int64_t last_time;
1709  int64_t cur_time;
1710  int aqsize, vqsize, sqsize;
1711  double av_diff;
1712 
1713  cur_time = av_gettime_relative();
1714  if (!last_time || (cur_time - last_time) >= 30000) {
1715  aqsize = 0;
1716  vqsize = 0;
1717  sqsize = 0;
1718  if (is->audio_st)
1719  aqsize = is->audioq.size;
1720  if (is->video_st)
1721  vqsize = is->videoq.size;
1722  if (is->subtitle_st)
1723  sqsize = is->subtitleq.size;
1724  av_diff = 0;
1725  if (is->audio_st && is->video_st)
1726  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1727  else if (is->video_st)
1728  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1729  else if (is->audio_st)
1730  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1731 
1733  av_bprintf(&buf,
1734  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1736  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1737  av_diff,
1738  is->frame_drops_early + is->frame_drops_late,
1739  aqsize / 1024,
1740  vqsize / 1024,
1741  sqsize);
1742 
1743  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1744  fprintf(stderr, "%s", buf.str);
1745  else
1746  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1747 
1748  fflush(stderr);
1749  av_bprint_finalize(&buf, NULL);
1750 
1751  last_time = cur_time;
1752  }
1753  }
1754 }
1755 
1756 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1757 {
1758  Frame *vp;
1759 
1760 #if defined(DEBUG_SYNC)
1761  printf("frame_type=%c pts=%0.3f\n",
1762  av_get_picture_type_char(src_frame->pict_type), pts);
1763 #endif
1764 
1765  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1766  return -1;
1767 
1768  vp->sar = src_frame->sample_aspect_ratio;
1769  vp->uploaded = 0;
1770 
1771  vp->width = src_frame->width;
1772  vp->height = src_frame->height;
1773  vp->format = src_frame->format;
1774 
1775  vp->pts = pts;
1776  vp->duration = duration;
1777  vp->pos = pos;
1778  vp->serial = serial;
1779 
1780  set_default_window_size(vp->width, vp->height, vp->sar);
1781 
1782  av_frame_move_ref(vp->frame, src_frame);
1783  frame_queue_push(&is->pictq);
1784  return 0;
1785 }
1786 
1788 {
1789  int got_picture;
1790 
1791  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1792  return -1;
1793 
1794  if (got_picture) {
1795  double dpts = NAN;
1796 
1797  if (frame->pts != AV_NOPTS_VALUE)
1798  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1799 
1800  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1801 
1803  if (frame->pts != AV_NOPTS_VALUE) {
1804  double diff = dpts - get_master_clock(is);
1805  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1806  diff - is->frame_last_filter_delay < 0 &&
1807  is->viddec.pkt_serial == is->vidclk.serial &&
1808  is->videoq.nb_packets) {
1809  is->frame_drops_early++;
1811  got_picture = 0;
1812  }
1813  }
1814  }
1815  }
1816 
1817  return got_picture;
1818 }
1819 
1820 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1821  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1822 {
1823  int ret, i;
1824  int nb_filters = graph->nb_filters;
1826 
1827  if (filtergraph) {
1830  if (!outputs || !inputs) {
1831  ret = AVERROR(ENOMEM);
1832  goto fail;
1833  }
1834 
1835  outputs->name = av_strdup("in");
1836  outputs->filter_ctx = source_ctx;
1837  outputs->pad_idx = 0;
1838  outputs->next = NULL;
1839 
1840  inputs->name = av_strdup("out");
1841  inputs->filter_ctx = sink_ctx;
1842  inputs->pad_idx = 0;
1843  inputs->next = NULL;
1844 
1845  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1846  goto fail;
1847  } else {
1848  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1849  goto fail;
1850  }
1851 
1852  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1853  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1854  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1855 
1856  ret = avfilter_graph_config(graph, NULL);
1857 fail:
1860  return ret;
1861 }
1862 
1863 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1864 {
1866  char sws_flags_str[512] = "";
1867  char buffersrc_args[256];
1868  int ret;
1869  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1870  AVCodecParameters *codecpar = is->video_st->codecpar;
1871  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1872  const AVDictionaryEntry *e = NULL;
1873  int nb_pix_fmts = 0;
1874  int i, j;
1876 
1877  if (!par)
1878  return AVERROR(ENOMEM);
1879 
1880  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1881  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1882  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1883  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1884  break;
1885  }
1886  }
1887  }
1888  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1889 
1890  while ((e = av_dict_iterate(sws_dict, e))) {
1891  if (!strcmp(e->key, "sws_flags")) {
1892  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1893  } else
1894  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1895  }
1896  if (strlen(sws_flags_str))
1897  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1898 
1899  graph->scale_sws_opts = av_strdup(sws_flags_str);
1900 
1901  snprintf(buffersrc_args, sizeof(buffersrc_args),
1902  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1903  "colorspace=%d:range=%d",
1904  frame->width, frame->height, frame->format,
1905  is->video_st->time_base.num, is->video_st->time_base.den,
1906  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1907  frame->colorspace, frame->color_range);
1908  if (fr.num && fr.den)
1909  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1910 
1911  if ((ret = avfilter_graph_create_filter(&filt_src,
1912  avfilter_get_by_name("buffer"),
1913  "ffplay_buffer", buffersrc_args, NULL,
1914  graph)) < 0)
1915  goto fail;
1916  par->hw_frames_ctx = frame->hw_frames_ctx;
1917  ret = av_buffersrc_parameters_set(filt_src, par);
1918  if (ret < 0)
1919  goto fail;
1920 
1921  ret = avfilter_graph_create_filter(&filt_out,
1922  avfilter_get_by_name("buffersink"),
1923  "ffplay_buffersink", NULL, NULL, graph);
1924  if (ret < 0)
1925  goto fail;
1926 
1927  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1928  goto fail;
1929  if (!vk_renderer &&
1931  goto fail;
1932 
1933  last_filter = filt_out;
1934 
1935 /* Note: this macro adds a filter before the lastly added filter, so the
1936  * processing order of the filters is in reverse */
1937 #define INSERT_FILT(name, arg) do { \
1938  AVFilterContext *filt_ctx; \
1939  \
1940  ret = avfilter_graph_create_filter(&filt_ctx, \
1941  avfilter_get_by_name(name), \
1942  "ffplay_" name, arg, NULL, graph); \
1943  if (ret < 0) \
1944  goto fail; \
1945  \
1946  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1947  if (ret < 0) \
1948  goto fail; \
1949  \
1950  last_filter = filt_ctx; \
1951 } while (0)
1952 
1953  if (autorotate) {
1954  double theta = 0.0;
1955  int32_t *displaymatrix = NULL;
1957  if (sd)
1958  displaymatrix = (int32_t *)sd->data;
1959  if (!displaymatrix) {
1960  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1961  is->video_st->codecpar->nb_coded_side_data,
1963  if (psd)
1964  displaymatrix = (int32_t *)psd->data;
1965  }
1966  theta = get_rotation(displaymatrix);
1967 
1968  if (fabs(theta - 90) < 1.0) {
1969  INSERT_FILT("transpose", "clock");
1970  } else if (fabs(theta - 180) < 1.0) {
1971  INSERT_FILT("hflip", NULL);
1972  INSERT_FILT("vflip", NULL);
1973  } else if (fabs(theta - 270) < 1.0) {
1974  INSERT_FILT("transpose", "cclock");
1975  } else if (fabs(theta) > 1.0) {
1976  char rotate_buf[64];
1977  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1978  INSERT_FILT("rotate", rotate_buf);
1979  }
1980  }
1981 
1982  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1983  goto fail;
1984 
1985  is->in_video_filter = filt_src;
1986  is->out_video_filter = filt_out;
1987 
1988 fail:
1989  av_freep(&par);
1990  return ret;
1991 }
1992 
1993 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1994 {
1996  int sample_rates[2] = { 0, -1 };
1997  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1998  char aresample_swr_opts[512] = "";
1999  const AVDictionaryEntry *e = NULL;
2000  AVBPrint bp;
2001  char asrc_args[256];
2002  int ret;
2003 
2004  avfilter_graph_free(&is->agraph);
2005  if (!(is->agraph = avfilter_graph_alloc()))
2006  return AVERROR(ENOMEM);
2007  is->agraph->nb_threads = filter_nbthreads;
2008 
2010 
2011  while ((e = av_dict_iterate(swr_opts, e)))
2012  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2013  if (strlen(aresample_swr_opts))
2014  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2015  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2016 
2017  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2018 
2019  ret = snprintf(asrc_args, sizeof(asrc_args),
2020  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2021  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2022  1, is->audio_filter_src.freq, bp.str);
2023 
2024  ret = avfilter_graph_create_filter(&filt_asrc,
2025  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2026  asrc_args, NULL, is->agraph);
2027  if (ret < 0)
2028  goto end;
2029 
2030 
2031  ret = avfilter_graph_create_filter(&filt_asink,
2032  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2033  NULL, NULL, is->agraph);
2034  if (ret < 0)
2035  goto end;
2036 
2037  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2038  goto end;
2039  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2040  goto end;
2041 
2042  if (force_output_format) {
2043  av_bprint_clear(&bp);
2044  av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2045  sample_rates [0] = is->audio_tgt.freq;
2046  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2047  goto end;
2048  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2049  goto end;
2050  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2051  goto end;
2052  }
2053 
2054 
2055  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2056  goto end;
2057 
2058  is->in_audio_filter = filt_asrc;
2059  is->out_audio_filter = filt_asink;
2060 
2061 end:
2062  if (ret < 0)
2063  avfilter_graph_free(&is->agraph);
2064  av_bprint_finalize(&bp, NULL);
2065 
2066  return ret;
2067 }
2068 
2069 static int audio_thread(void *arg)
2070 {
2071  VideoState *is = arg;
2073  Frame *af;
2074  int last_serial = -1;
2075  int reconfigure;
2076  int got_frame = 0;
2077  AVRational tb;
2078  int ret = 0;
2079 
2080  if (!frame)
2081  return AVERROR(ENOMEM);
2082 
2083  do {
2084  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2085  goto the_end;
2086 
2087  if (got_frame) {
2088  tb = (AVRational){1, frame->sample_rate};
2089 
2090  reconfigure =
2091  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2092  frame->format, frame->ch_layout.nb_channels) ||
2093  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2094  is->audio_filter_src.freq != frame->sample_rate ||
2095  is->auddec.pkt_serial != last_serial;
2096 
2097  if (reconfigure) {
2098  char buf1[1024], buf2[1024];
2099  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2100  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2102  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2103  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2104  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2105 
2106  is->audio_filter_src.fmt = frame->format;
2107  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2108  if (ret < 0)
2109  goto the_end;
2110  is->audio_filter_src.freq = frame->sample_rate;
2111  last_serial = is->auddec.pkt_serial;
2112 
2113  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2114  goto the_end;
2115  }
2116 
2117  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2118  goto the_end;
2119 
2120  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2121  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2122  tb = av_buffersink_get_time_base(is->out_audio_filter);
2123  if (!(af = frame_queue_peek_writable(&is->sampq)))
2124  goto the_end;
2125 
2126  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2127  af->pos = fd ? fd->pkt_pos : -1;
2128  af->serial = is->auddec.pkt_serial;
2129  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2130 
2132  frame_queue_push(&is->sampq);
2133 
2134  if (is->audioq.serial != is->auddec.pkt_serial)
2135  break;
2136  }
2137  if (ret == AVERROR_EOF)
2138  is->auddec.finished = is->auddec.pkt_serial;
2139  }
2140  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2141  the_end:
2142  avfilter_graph_free(&is->agraph);
2143  av_frame_free(&frame);
2144  return ret;
2145 }
2146 
2147 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2148 {
2149  packet_queue_start(d->queue);
2150  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2151  if (!d->decoder_tid) {
2152  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2153  return AVERROR(ENOMEM);
2154  }
2155  return 0;
2156 }
2157 
2158 static int video_thread(void *arg)
2159 {
2160  VideoState *is = arg;
2162  double pts;
2163  double duration;
2164  int ret;
2165  AVRational tb = is->video_st->time_base;
2166  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2167 
2168  AVFilterGraph *graph = NULL;
2169  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2170  int last_w = 0;
2171  int last_h = 0;
2172  enum AVPixelFormat last_format = -2;
2173  int last_serial = -1;
2174  int last_vfilter_idx = 0;
2175 
2176  if (!frame)
2177  return AVERROR(ENOMEM);
2178 
2179  for (;;) {
2181  if (ret < 0)
2182  goto the_end;
2183  if (!ret)
2184  continue;
2185 
2186  if ( last_w != frame->width
2187  || last_h != frame->height
2188  || last_format != frame->format
2189  || last_serial != is->viddec.pkt_serial
2190  || last_vfilter_idx != is->vfilter_idx) {
2192  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2193  last_w, last_h,
2194  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2195  frame->width, frame->height,
2196  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2197  avfilter_graph_free(&graph);
2198  graph = avfilter_graph_alloc();
2199  if (!graph) {
2200  ret = AVERROR(ENOMEM);
2201  goto the_end;
2202  }
2203  graph->nb_threads = filter_nbthreads;
2204  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2205  SDL_Event event;
2206  event.type = FF_QUIT_EVENT;
2207  event.user.data1 = is;
2208  SDL_PushEvent(&event);
2209  goto the_end;
2210  }
2211  filt_in = is->in_video_filter;
2212  filt_out = is->out_video_filter;
2213  last_w = frame->width;
2214  last_h = frame->height;
2215  last_format = frame->format;
2216  last_serial = is->viddec.pkt_serial;
2217  last_vfilter_idx = is->vfilter_idx;
2218  frame_rate = av_buffersink_get_frame_rate(filt_out);
2219  }
2220 
2221  ret = av_buffersrc_add_frame(filt_in, frame);
2222  if (ret < 0)
2223  goto the_end;
2224 
2225  while (ret >= 0) {
2226  FrameData *fd;
2227 
2228  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2229 
2230  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2231  if (ret < 0) {
2232  if (ret == AVERROR_EOF)
2233  is->viddec.finished = is->viddec.pkt_serial;
2234  ret = 0;
2235  break;
2236  }
2237 
2238  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2239 
2240  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2241  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2242  is->frame_last_filter_delay = 0;
2243  tb = av_buffersink_get_time_base(filt_out);
2244  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2245  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2246  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2248  if (is->videoq.serial != is->viddec.pkt_serial)
2249  break;
2250  }
2251 
2252  if (ret < 0)
2253  goto the_end;
2254  }
2255  the_end:
2256  avfilter_graph_free(&graph);
2257  av_frame_free(&frame);
2258  return 0;
2259 }
2260 
2261 static int subtitle_thread(void *arg)
2262 {
2263  VideoState *is = arg;
2264  Frame *sp;
2265  int got_subtitle;
2266  double pts;
2267 
2268  for (;;) {
2269  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2270  return 0;
2271 
2272  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2273  break;
2274 
2275  pts = 0;
2276 
2277  if (got_subtitle && sp->sub.format == 0) {
2278  if (sp->sub.pts != AV_NOPTS_VALUE)
2279  pts = sp->sub.pts / (double)AV_TIME_BASE;
2280  sp->pts = pts;
2281  sp->serial = is->subdec.pkt_serial;
2282  sp->width = is->subdec.avctx->width;
2283  sp->height = is->subdec.avctx->height;
2284  sp->uploaded = 0;
2285 
2286  /* now we can update the picture count */
2287  frame_queue_push(&is->subpq);
2288  } else if (got_subtitle) {
2289  avsubtitle_free(&sp->sub);
2290  }
2291  }
2292  return 0;
2293 }
2294 
2295 /* copy samples for viewing in editor window */
2296 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2297 {
2298  int size, len;
2299 
2300  size = samples_size / sizeof(short);
2301  while (size > 0) {
2302  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2303  if (len > size)
2304  len = size;
2305  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2306  samples += len;
2307  is->sample_array_index += len;
2308  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2309  is->sample_array_index = 0;
2310  size -= len;
2311  }
2312 }
2313 
2314 /* return the wanted number of samples to get better sync if sync_type is video
2315  * or external master clock */
2316 static int synchronize_audio(VideoState *is, int nb_samples)
2317 {
2318  int wanted_nb_samples = nb_samples;
2319 
2320  /* if not master, then we try to remove or add samples to correct the clock */
2322  double diff, avg_diff;
2323  int min_nb_samples, max_nb_samples;
2324 
2325  diff = get_clock(&is->audclk) - get_master_clock(is);
2326 
2327  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2328  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2329  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2330  /* not enough measures to have a correct estimate */
2331  is->audio_diff_avg_count++;
2332  } else {
2333  /* estimate the A-V difference */
2334  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2335 
2336  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2337  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2338  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2339  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2340  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2341  }
2342  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2343  diff, avg_diff, wanted_nb_samples - nb_samples,
2344  is->audio_clock, is->audio_diff_threshold);
2345  }
2346  } else {
2347  /* too big difference : may be initial PTS errors, so
2348  reset A-V filter */
2349  is->audio_diff_avg_count = 0;
2350  is->audio_diff_cum = 0;
2351  }
2352  }
2353 
2354  return wanted_nb_samples;
2355 }
2356 
2357 /**
2358  * Decode one audio frame and return its uncompressed size.
2359  *
2360  * The processed audio frame is decoded, converted if required, and
2361  * stored in is->audio_buf, with size in bytes given by the return
2362  * value.
2363  */
2365 {
2366  int data_size, resampled_data_size;
2367  av_unused double audio_clock0;
2368  int wanted_nb_samples;
2369  Frame *af;
2370 
2371  if (is->paused)
2372  return -1;
2373 
2374  do {
2375 #if defined(_WIN32)
2376  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2377  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2378  return -1;
2379  av_usleep (1000);
2380  }
2381 #endif
2382  if (!(af = frame_queue_peek_readable(&is->sampq)))
2383  return -1;
2384  frame_queue_next(&is->sampq);
2385  } while (af->serial != is->audioq.serial);
2386 
2388  af->frame->nb_samples,
2389  af->frame->format, 1);
2390 
2391  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2392 
2393  if (af->frame->format != is->audio_src.fmt ||
2394  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2395  af->frame->sample_rate != is->audio_src.freq ||
2396  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2397  swr_free(&is->swr_ctx);
2398  swr_alloc_set_opts2(&is->swr_ctx,
2399  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2400  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2401  0, NULL);
2402  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2404  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2406  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2407  swr_free(&is->swr_ctx);
2408  return -1;
2409  }
2410  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2411  return -1;
2412  is->audio_src.freq = af->frame->sample_rate;
2413  is->audio_src.fmt = af->frame->format;
2414  }
2415 
2416  if (is->swr_ctx) {
2417  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2418  uint8_t **out = &is->audio_buf1;
2419  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2420  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2421  int len2;
2422  if (out_size < 0) {
2423  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2424  return -1;
2425  }
2426  if (wanted_nb_samples != af->frame->nb_samples) {
2427  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2428  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2429  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2430  return -1;
2431  }
2432  }
2433  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2434  if (!is->audio_buf1)
2435  return AVERROR(ENOMEM);
2436  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2437  if (len2 < 0) {
2438  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2439  return -1;
2440  }
2441  if (len2 == out_count) {
2442  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2443  if (swr_init(is->swr_ctx) < 0)
2444  swr_free(&is->swr_ctx);
2445  }
2446  is->audio_buf = is->audio_buf1;
2447  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2448  } else {
2449  is->audio_buf = af->frame->data[0];
2450  resampled_data_size = data_size;
2451  }
2452 
2453  audio_clock0 = is->audio_clock;
2454  /* update the audio clock with the pts */
2455  if (!isnan(af->pts))
2456  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2457  else
2458  is->audio_clock = NAN;
2459  is->audio_clock_serial = af->serial;
2460 #ifdef DEBUG
2461  {
2462  static double last_clock;
2463  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2464  is->audio_clock - last_clock,
2465  is->audio_clock, audio_clock0);
2466  last_clock = is->audio_clock;
2467  }
2468 #endif
2469  return resampled_data_size;
2470 }
2471 
2472 /* prepare a new audio buffer */
2473 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2474 {
2475  VideoState *is = opaque;
2476  int audio_size, len1;
2477 
2479 
2480  while (len > 0) {
2481  if (is->audio_buf_index >= is->audio_buf_size) {
2482  audio_size = audio_decode_frame(is);
2483  if (audio_size < 0) {
2484  /* if error, just output silence */
2485  is->audio_buf = NULL;
2486  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2487  } else {
2488  if (is->show_mode != SHOW_MODE_VIDEO)
2489  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2490  is->audio_buf_size = audio_size;
2491  }
2492  is->audio_buf_index = 0;
2493  }
2494  len1 = is->audio_buf_size - is->audio_buf_index;
2495  if (len1 > len)
2496  len1 = len;
2497  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2498  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2499  else {
2500  memset(stream, 0, len1);
2501  if (!is->muted && is->audio_buf)
2502  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2503  }
2504  len -= len1;
2505  stream += len1;
2506  is->audio_buf_index += len1;
2507  }
2508  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2509  /* Let's assume the audio driver that is used by SDL has two periods. */
2510  if (!isnan(is->audio_clock)) {
2511  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2512  sync_clock_to_slave(&is->extclk, &is->audclk);
2513  }
2514 }
2515 
2516 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2517 {
2518  SDL_AudioSpec wanted_spec, spec;
2519  const char *env;
2520  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2521  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2522  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2523  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2524 
2525  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2526  if (env) {
2527  wanted_nb_channels = atoi(env);
2528  av_channel_layout_uninit(wanted_channel_layout);
2529  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2530  }
2531  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2532  av_channel_layout_uninit(wanted_channel_layout);
2533  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2534  }
2535  wanted_nb_channels = wanted_channel_layout->nb_channels;
2536  wanted_spec.channels = wanted_nb_channels;
2537  wanted_spec.freq = wanted_sample_rate;
2538  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2539  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2540  return -1;
2541  }
2542  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2543  next_sample_rate_idx--;
2544  wanted_spec.format = AUDIO_S16SYS;
2545  wanted_spec.silence = 0;
2546  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2547  wanted_spec.callback = sdl_audio_callback;
2548  wanted_spec.userdata = opaque;
2549  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2550  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2551  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2552  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2553  if (!wanted_spec.channels) {
2554  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2555  wanted_spec.channels = wanted_nb_channels;
2556  if (!wanted_spec.freq) {
2558  "No more combinations to try, audio open failed\n");
2559  return -1;
2560  }
2561  }
2562  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2563  }
2564  if (spec.format != AUDIO_S16SYS) {
2566  "SDL advised audio format %d is not supported!\n", spec.format);
2567  return -1;
2568  }
2569  if (spec.channels != wanted_spec.channels) {
2570  av_channel_layout_uninit(wanted_channel_layout);
2571  av_channel_layout_default(wanted_channel_layout, spec.channels);
2572  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2574  "SDL advised channel count %d is not supported!\n", spec.channels);
2575  return -1;
2576  }
2577  }
2578 
2579  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2580  audio_hw_params->freq = spec.freq;
2581  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2582  return -1;
2583  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2584  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2585  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2586  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2587  return -1;
2588  }
2589  return spec.size;
2590 }
2591 
2592 static int create_hwaccel(AVBufferRef **device_ctx)
2593 {
2594  enum AVHWDeviceType type;
2595  int ret;
2596  AVBufferRef *vk_dev;
2597 
2598  *device_ctx = NULL;
2599 
2600  if (!hwaccel)
2601  return 0;
2602 
2604  if (type == AV_HWDEVICE_TYPE_NONE)
2605  return AVERROR(ENOTSUP);
2606 
2608  if (ret < 0)
2609  return ret;
2610 
2611  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2612  if (!ret)
2613  return 0;
2614 
2615  if (ret != AVERROR(ENOSYS))
2616  return ret;
2617 
2618  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2619  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2620  return ret;
2621 }
2622 
2623 /* open a given stream. Return 0 if OK */
2624 static int stream_component_open(VideoState *is, int stream_index)
2625 {
2626  AVFormatContext *ic = is->ic;
2627  AVCodecContext *avctx;
2628  const AVCodec *codec;
2629  const char *forced_codec_name = NULL;
2630  AVDictionary *opts = NULL;
2631  const AVDictionaryEntry *t = NULL;
2632  int sample_rate;
2633  AVChannelLayout ch_layout = { 0 };
2634  int ret = 0;
2635  int stream_lowres = lowres;
2636 
2637  if (stream_index < 0 || stream_index >= ic->nb_streams)
2638  return -1;
2639 
2640  avctx = avcodec_alloc_context3(NULL);
2641  if (!avctx)
2642  return AVERROR(ENOMEM);
2643 
2644  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2645  if (ret < 0)
2646  goto fail;
2647  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2648 
2649  codec = avcodec_find_decoder(avctx->codec_id);
2650 
2651  switch(avctx->codec_type){
2652  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2653  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2654  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2655  }
2656  if (forced_codec_name)
2657  codec = avcodec_find_decoder_by_name(forced_codec_name);
2658  if (!codec) {
2659  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2660  "No codec could be found with name '%s'\n", forced_codec_name);
2661  else av_log(NULL, AV_LOG_WARNING,
2662  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2663  ret = AVERROR(EINVAL);
2664  goto fail;
2665  }
2666 
2667  avctx->codec_id = codec->id;
2668  if (stream_lowres > codec->max_lowres) {
2669  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2670  codec->max_lowres);
2671  stream_lowres = codec->max_lowres;
2672  }
2673  avctx->lowres = stream_lowres;
2674 
2675  if (fast)
2676  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2677 
2678  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2679  ic->streams[stream_index], codec, &opts);
2680  if (ret < 0)
2681  goto fail;
2682 
2683  if (!av_dict_get(opts, "threads", NULL, 0))
2684  av_dict_set(&opts, "threads", "auto", 0);
2685  if (stream_lowres)
2686  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2687 
2688  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2689 
2690  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2691  ret = create_hwaccel(&avctx->hw_device_ctx);
2692  if (ret < 0)
2693  goto fail;
2694  }
2695 
2696  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2697  goto fail;
2698  }
2699  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2700  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2702  goto fail;
2703  }
2704 
2705  is->eof = 0;
2706  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2707  switch (avctx->codec_type) {
2708  case AVMEDIA_TYPE_AUDIO:
2709  {
2710  AVFilterContext *sink;
2711 
2712  is->audio_filter_src.freq = avctx->sample_rate;
2713  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2714  if (ret < 0)
2715  goto fail;
2716  is->audio_filter_src.fmt = avctx->sample_fmt;
2717  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2718  goto fail;
2719  sink = is->out_audio_filter;
2721  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2722  if (ret < 0)
2723  goto fail;
2724  }
2725 
2726  /* prepare audio output */
2727  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2728  goto fail;
2729  is->audio_hw_buf_size = ret;
2730  is->audio_src = is->audio_tgt;
2731  is->audio_buf_size = 0;
2732  is->audio_buf_index = 0;
2733 
2734  /* init averaging filter */
2735  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2736  is->audio_diff_avg_count = 0;
2737  /* since we do not have a precise anough audio FIFO fullness,
2738  we correct audio sync only if larger than this threshold */
2739  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2740 
2741  is->audio_stream = stream_index;
2742  is->audio_st = ic->streams[stream_index];
2743 
2744  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2745  goto fail;
2746  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2747  is->auddec.start_pts = is->audio_st->start_time;
2748  is->auddec.start_pts_tb = is->audio_st->time_base;
2749  }
2750  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2751  goto out;
2752  SDL_PauseAudioDevice(audio_dev, 0);
2753  break;
2754  case AVMEDIA_TYPE_VIDEO:
2755  is->video_stream = stream_index;
2756  is->video_st = ic->streams[stream_index];
2757 
2758  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2759  goto fail;
2760  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2761  goto out;
2762  is->queue_attachments_req = 1;
2763  break;
2764  case AVMEDIA_TYPE_SUBTITLE:
2765  is->subtitle_stream = stream_index;
2766  is->subtitle_st = ic->streams[stream_index];
2767 
2768  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2769  goto fail;
2770  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2771  goto out;
2772  break;
2773  default:
2774  break;
2775  }
2776  goto out;
2777 
2778 fail:
2779  avcodec_free_context(&avctx);
2780 out:
2781  av_channel_layout_uninit(&ch_layout);
2782  av_dict_free(&opts);
2783 
2784  return ret;
2785 }
2786 
2787 static int decode_interrupt_cb(void *ctx)
2788 {
2789  VideoState *is = ctx;
2790  return is->abort_request;
2791 }
2792 
2793 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2794  return stream_id < 0 ||
2795  queue->abort_request ||
2797  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2798 }
2799 
2801 {
2802  if( !strcmp(s->iformat->name, "rtp")
2803  || !strcmp(s->iformat->name, "rtsp")
2804  || !strcmp(s->iformat->name, "sdp")
2805  )
2806  return 1;
2807 
2808  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2809  || !strncmp(s->url, "udp:", 4)
2810  )
2811  )
2812  return 1;
2813  return 0;
2814 }
2815 
2816 /* this thread gets the stream from the disk or the network */
2817 static int read_thread(void *arg)
2818 {
2819  VideoState *is = arg;
2820  AVFormatContext *ic = NULL;
2821  int err, i, ret;
2822  int st_index[AVMEDIA_TYPE_NB];
2823  AVPacket *pkt = NULL;
2824  int64_t stream_start_time;
2825  int pkt_in_play_range = 0;
2826  const AVDictionaryEntry *t;
2827  SDL_mutex *wait_mutex = SDL_CreateMutex();
2828  int scan_all_pmts_set = 0;
2829  int64_t pkt_ts;
2830 
2831  if (!wait_mutex) {
2832  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2833  ret = AVERROR(ENOMEM);
2834  goto fail;
2835  }
2836 
2837  memset(st_index, -1, sizeof(st_index));
2838  is->eof = 0;
2839 
2840  pkt = av_packet_alloc();
2841  if (!pkt) {
2842  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2843  ret = AVERROR(ENOMEM);
2844  goto fail;
2845  }
2846  ic = avformat_alloc_context();
2847  if (!ic) {
2848  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2849  ret = AVERROR(ENOMEM);
2850  goto fail;
2851  }
2854  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2855  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2856  scan_all_pmts_set = 1;
2857  }
2858  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2859  if (err < 0) {
2860  print_error(is->filename, err);
2861  ret = -1;
2862  goto fail;
2863  }
2864  if (scan_all_pmts_set)
2865  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2866 
2868  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2870  goto fail;
2871  }
2872  is->ic = ic;
2873 
2874  if (genpts)
2875  ic->flags |= AVFMT_FLAG_GENPTS;
2876 
2877  if (find_stream_info) {
2878  AVDictionary **opts;
2879  int orig_nb_streams = ic->nb_streams;
2880 
2882  if (err < 0) {
2884  "Error setting up avformat_find_stream_info() options\n");
2885  ret = err;
2886  goto fail;
2887  }
2888 
2889  err = avformat_find_stream_info(ic, opts);
2890 
2891  for (i = 0; i < orig_nb_streams; i++)
2892  av_dict_free(&opts[i]);
2893  av_freep(&opts);
2894 
2895  if (err < 0) {
2897  "%s: could not find codec parameters\n", is->filename);
2898  ret = -1;
2899  goto fail;
2900  }
2901  }
2902 
2903  if (ic->pb)
2904  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2905 
2906  if (seek_by_bytes < 0)
2908  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2909  strcmp("ogg", ic->iformat->name);
2910 
2911  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2912 
2913  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2914  window_title = av_asprintf("%s - %s", t->value, input_filename);
2915 
2916  /* if seeking requested, we execute it */
2917  if (start_time != AV_NOPTS_VALUE) {
2918  int64_t timestamp;
2919 
2920  timestamp = start_time;
2921  /* add the stream start time */
2922  if (ic->start_time != AV_NOPTS_VALUE)
2923  timestamp += ic->start_time;
2924  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2925  if (ret < 0) {
2926  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2927  is->filename, (double)timestamp / AV_TIME_BASE);
2928  }
2929  }
2930 
2931  is->realtime = is_realtime(ic);
2932 
2933  if (show_status)
2934  av_dump_format(ic, 0, is->filename, 0);
2935 
2936  for (i = 0; i < ic->nb_streams; i++) {
2937  AVStream *st = ic->streams[i];
2938  enum AVMediaType type = st->codecpar->codec_type;
2939  st->discard = AVDISCARD_ALL;
2940  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2942  st_index[type] = i;
2943  }
2944  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2945  if (wanted_stream_spec[i] && st_index[i] == -1) {
2946  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2947  st_index[i] = INT_MAX;
2948  }
2949  }
2950 
2951  if (!video_disable)
2952  st_index[AVMEDIA_TYPE_VIDEO] =
2954  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2955  if (!audio_disable)
2956  st_index[AVMEDIA_TYPE_AUDIO] =
2958  st_index[AVMEDIA_TYPE_AUDIO],
2959  st_index[AVMEDIA_TYPE_VIDEO],
2960  NULL, 0);
2962  st_index[AVMEDIA_TYPE_SUBTITLE] =
2964  st_index[AVMEDIA_TYPE_SUBTITLE],
2965  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2966  st_index[AVMEDIA_TYPE_AUDIO] :
2967  st_index[AVMEDIA_TYPE_VIDEO]),
2968  NULL, 0);
2969 
2970  is->show_mode = show_mode;
2971  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2972  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2973  AVCodecParameters *codecpar = st->codecpar;
2975  if (codecpar->width)
2976  set_default_window_size(codecpar->width, codecpar->height, sar);
2977  }
2978 
2979  /* open the streams */
2980  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2982  }
2983 
2984  ret = -1;
2985  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2987  }
2988  if (is->show_mode == SHOW_MODE_NONE)
2989  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2990 
2991  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2993  }
2994 
2995  if (is->video_stream < 0 && is->audio_stream < 0) {
2996  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2997  is->filename);
2998  ret = -1;
2999  goto fail;
3000  }
3001 
3002  if (infinite_buffer < 0 && is->realtime)
3003  infinite_buffer = 1;
3004 
3005  for (;;) {
3006  if (is->abort_request)
3007  break;
3008  if (is->paused != is->last_paused) {
3009  is->last_paused = is->paused;
3010  if (is->paused)
3011  is->read_pause_return = av_read_pause(ic);
3012  else
3013  av_read_play(ic);
3014  }
3015 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3016  if (is->paused &&
3017  (!strcmp(ic->iformat->name, "rtsp") ||
3018  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3019  /* wait 10 ms to avoid trying to get another packet */
3020  /* XXX: horrible */
3021  SDL_Delay(10);
3022  continue;
3023  }
3024 #endif
3025  if (is->seek_req) {
3026  int64_t seek_target = is->seek_pos;
3027  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3028  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3029 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3030 // of the seek_pos/seek_rel variables
3031 
3032  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3033  if (ret < 0) {
3035  "%s: error while seeking\n", is->ic->url);
3036  } else {
3037  if (is->audio_stream >= 0)
3038  packet_queue_flush(&is->audioq);
3039  if (is->subtitle_stream >= 0)
3040  packet_queue_flush(&is->subtitleq);
3041  if (is->video_stream >= 0)
3042  packet_queue_flush(&is->videoq);
3043  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3044  set_clock(&is->extclk, NAN, 0);
3045  } else {
3046  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3047  }
3048  }
3049  is->seek_req = 0;
3050  is->queue_attachments_req = 1;
3051  is->eof = 0;
3052  if (is->paused)
3054  }
3055  if (is->queue_attachments_req) {
3056  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3057  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3058  goto fail;
3059  packet_queue_put(&is->videoq, pkt);
3060  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3061  }
3062  is->queue_attachments_req = 0;
3063  }
3064 
3065  /* if the queue are full, no need to read more */
3066  if (infinite_buffer<1 &&
3067  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3068  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3069  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3070  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3071  /* wait 10 ms */
3072  SDL_LockMutex(wait_mutex);
3073  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3074  SDL_UnlockMutex(wait_mutex);
3075  continue;
3076  }
3077  if (!is->paused &&
3078  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3079  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3080  if (loop != 1 && (!loop || --loop)) {
3082  } else if (autoexit) {
3083  ret = AVERROR_EOF;
3084  goto fail;
3085  }
3086  }
3087  ret = av_read_frame(ic, pkt);
3088  if (ret < 0) {
3089  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3090  if (is->video_stream >= 0)
3091  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3092  if (is->audio_stream >= 0)
3093  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3094  if (is->subtitle_stream >= 0)
3095  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3096  is->eof = 1;
3097  }
3098  if (ic->pb && ic->pb->error) {
3099  if (autoexit)
3100  goto fail;
3101  else
3102  break;
3103  }
3104  SDL_LockMutex(wait_mutex);
3105  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3106  SDL_UnlockMutex(wait_mutex);
3107  continue;
3108  } else {
3109  is->eof = 0;
3110  }
3111  /* check if packet is in play range specified by user, then queue, otherwise discard */
3112  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3113  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3114  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3115  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3117  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3118  <= ((double)duration / 1000000);
3119  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3120  packet_queue_put(&is->audioq, pkt);
3121  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3122  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3123  packet_queue_put(&is->videoq, pkt);
3124  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3125  packet_queue_put(&is->subtitleq, pkt);
3126  } else {
3128  }
3129  }
3130 
3131  ret = 0;
3132  fail:
3133  if (ic && !is->ic)
3134  avformat_close_input(&ic);
3135 
3136  av_packet_free(&pkt);
3137  if (ret != 0) {
3138  SDL_Event event;
3139 
3140  event.type = FF_QUIT_EVENT;
3141  event.user.data1 = is;
3142  SDL_PushEvent(&event);
3143  }
3144  SDL_DestroyMutex(wait_mutex);
3145  return 0;
3146 }
3147 
3148 static VideoState *stream_open(const char *filename,
3149  const AVInputFormat *iformat)
3150 {
3151  VideoState *is;
3152 
3153  is = av_mallocz(sizeof(VideoState));
3154  if (!is)
3155  return NULL;
3156  is->last_video_stream = is->video_stream = -1;
3157  is->last_audio_stream = is->audio_stream = -1;
3158  is->last_subtitle_stream = is->subtitle_stream = -1;
3159  is->filename = av_strdup(filename);
3160  if (!is->filename)
3161  goto fail;
3162  is->iformat = iformat;
3163  is->ytop = 0;
3164  is->xleft = 0;
3165 
3166  /* start video display */
3167  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3168  goto fail;
3169  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3170  goto fail;
3171  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3172  goto fail;
3173 
3174  if (packet_queue_init(&is->videoq) < 0 ||
3175  packet_queue_init(&is->audioq) < 0 ||
3176  packet_queue_init(&is->subtitleq) < 0)
3177  goto fail;
3178 
3179  if (!(is->continue_read_thread = SDL_CreateCond())) {
3180  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3181  goto fail;
3182  }
3183 
3184  init_clock(&is->vidclk, &is->videoq.serial);
3185  init_clock(&is->audclk, &is->audioq.serial);
3186  init_clock(&is->extclk, &is->extclk.serial);
3187  is->audio_clock_serial = -1;
3188  if (startup_volume < 0)
3189  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3190  if (startup_volume > 100)
3191  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3193  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3194  is->audio_volume = startup_volume;
3195  is->muted = 0;
3196  is->av_sync_type = av_sync_type;
3197  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3198  if (!is->read_tid) {
3199  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3200 fail:
3201  stream_close(is);
3202  return NULL;
3203  }
3204  return is;
3205 }
3206 
3208 {
3209  AVFormatContext *ic = is->ic;
3210  int start_index, stream_index;
3211  int old_index;
3212  AVStream *st;
3213  AVProgram *p = NULL;
3214  int nb_streams = is->ic->nb_streams;
3215 
3216  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3217  start_index = is->last_video_stream;
3218  old_index = is->video_stream;
3219  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3220  start_index = is->last_audio_stream;
3221  old_index = is->audio_stream;
3222  } else {
3223  start_index = is->last_subtitle_stream;
3224  old_index = is->subtitle_stream;
3225  }
3226  stream_index = start_index;
3227 
3228  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3229  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3230  if (p) {
3232  for (start_index = 0; start_index < nb_streams; start_index++)
3233  if (p->stream_index[start_index] == stream_index)
3234  break;
3235  if (start_index == nb_streams)
3236  start_index = -1;
3237  stream_index = start_index;
3238  }
3239  }
3240 
3241  for (;;) {
3242  if (++stream_index >= nb_streams)
3243  {
3245  {
3246  stream_index = -1;
3247  is->last_subtitle_stream = -1;
3248  goto the_end;
3249  }
3250  if (start_index == -1)
3251  return;
3252  stream_index = 0;
3253  }
3254  if (stream_index == start_index)
3255  return;
3256  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3257  if (st->codecpar->codec_type == codec_type) {
3258  /* check that parameters are OK */
3259  switch (codec_type) {
3260  case AVMEDIA_TYPE_AUDIO:
3261  if (st->codecpar->sample_rate != 0 &&
3262  st->codecpar->ch_layout.nb_channels != 0)
3263  goto the_end;
3264  break;
3265  case AVMEDIA_TYPE_VIDEO:
3266  case AVMEDIA_TYPE_SUBTITLE:
3267  goto the_end;
3268  default:
3269  break;
3270  }
3271  }
3272  }
3273  the_end:
3274  if (p && stream_index != -1)
3275  stream_index = p->stream_index[stream_index];
3276  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3278  old_index,
3279  stream_index);
3280 
3281  stream_component_close(is, old_index);
3282  stream_component_open(is, stream_index);
3283 }
3284 
3285 
3287 {
3289  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3290 }
3291 
3293 {
3294  int next = is->show_mode;
3295  do {
3296  next = (next + 1) % SHOW_MODE_NB;
3297  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3298  if (is->show_mode != next) {
3299  is->force_refresh = 1;
3300  is->show_mode = next;
3301  }
3302 }
3303 
3304 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3305  double remaining_time = 0.0;
3306  SDL_PumpEvents();
3307  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3309  SDL_ShowCursor(0);
3310  cursor_hidden = 1;
3311  }
3312  if (remaining_time > 0.0)
3313  av_usleep((int64_t)(remaining_time * 1000000.0));
3314  remaining_time = REFRESH_RATE;
3315  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3316  video_refresh(is, &remaining_time);
3317  SDL_PumpEvents();
3318  }
3319 }
3320 
3321 static void seek_chapter(VideoState *is, int incr)
3322 {
3323  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3324  int i;
3325 
3326  if (!is->ic->nb_chapters)
3327  return;
3328 
3329  /* find the current chapter */
3330  for (i = 0; i < is->ic->nb_chapters; i++) {
3331  AVChapter *ch = is->ic->chapters[i];
3332  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3333  i--;
3334  break;
3335  }
3336  }
3337 
3338  i += incr;
3339  i = FFMAX(i, 0);
3340  if (i >= is->ic->nb_chapters)
3341  return;
3342 
3343  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3344  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3345  AV_TIME_BASE_Q), 0, 0);
3346 }
3347 
3348 /* handle an event sent by the GUI */
3349 static void event_loop(VideoState *cur_stream)
3350 {
3351  SDL_Event event;
3352  double incr, pos, frac;
3353 
3354  for (;;) {
3355  double x;
3356  refresh_loop_wait_event(cur_stream, &event);
3357  switch (event.type) {
3358  case SDL_KEYDOWN:
3359  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3360  do_exit(cur_stream);
3361  break;
3362  }
3363  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3364  if (!cur_stream->width)
3365  continue;
3366  switch (event.key.keysym.sym) {
3367  case SDLK_f:
3368  toggle_full_screen(cur_stream);
3369  cur_stream->force_refresh = 1;
3370  break;
3371  case SDLK_p:
3372  case SDLK_SPACE:
3373  toggle_pause(cur_stream);
3374  break;
3375  case SDLK_m:
3376  toggle_mute(cur_stream);
3377  break;
3378  case SDLK_KP_MULTIPLY:
3379  case SDLK_0:
3380  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3381  break;
3382  case SDLK_KP_DIVIDE:
3383  case SDLK_9:
3384  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3385  break;
3386  case SDLK_s: // S: Step to next frame
3387  step_to_next_frame(cur_stream);
3388  break;
3389  case SDLK_a:
3391  break;
3392  case SDLK_v:
3394  break;
3395  case SDLK_c:
3399  break;
3400  case SDLK_t:
3402  break;
3403  case SDLK_w:
3404  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3405  if (++cur_stream->vfilter_idx >= nb_vfilters)
3406  cur_stream->vfilter_idx = 0;
3407  } else {
3408  cur_stream->vfilter_idx = 0;
3409  toggle_audio_display(cur_stream);
3410  }
3411  break;
3412  case SDLK_PAGEUP:
3413  if (cur_stream->ic->nb_chapters <= 1) {
3414  incr = 600.0;
3415  goto do_seek;
3416  }
3417  seek_chapter(cur_stream, 1);
3418  break;
3419  case SDLK_PAGEDOWN:
3420  if (cur_stream->ic->nb_chapters <= 1) {
3421  incr = -600.0;
3422  goto do_seek;
3423  }
3424  seek_chapter(cur_stream, -1);
3425  break;
3426  case SDLK_LEFT:
3427  incr = seek_interval ? -seek_interval : -10.0;
3428  goto do_seek;
3429  case SDLK_RIGHT:
3430  incr = seek_interval ? seek_interval : 10.0;
3431  goto do_seek;
3432  case SDLK_UP:
3433  incr = 60.0;
3434  goto do_seek;
3435  case SDLK_DOWN:
3436  incr = -60.0;
3437  do_seek:
3438  if (seek_by_bytes) {
3439  pos = -1;
3440  if (pos < 0 && cur_stream->video_stream >= 0)
3441  pos = frame_queue_last_pos(&cur_stream->pictq);
3442  if (pos < 0 && cur_stream->audio_stream >= 0)
3443  pos = frame_queue_last_pos(&cur_stream->sampq);
3444  if (pos < 0)
3445  pos = avio_tell(cur_stream->ic->pb);
3446  if (cur_stream->ic->bit_rate)
3447  incr *= cur_stream->ic->bit_rate / 8.0;
3448  else
3449  incr *= 180000.0;
3450  pos += incr;
3451  stream_seek(cur_stream, pos, incr, 1);
3452  } else {
3453  pos = get_master_clock(cur_stream);
3454  if (isnan(pos))
3455  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3456  pos += incr;
3457  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3458  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3459  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3460  }
3461  break;
3462  default:
3463  break;
3464  }
3465  break;
3466  case SDL_MOUSEBUTTONDOWN:
3467  if (exit_on_mousedown) {
3468  do_exit(cur_stream);
3469  break;
3470  }
3471  if (event.button.button == SDL_BUTTON_LEFT) {
3472  static int64_t last_mouse_left_click = 0;
3473  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3474  toggle_full_screen(cur_stream);
3475  cur_stream->force_refresh = 1;
3476  last_mouse_left_click = 0;
3477  } else {
3478  last_mouse_left_click = av_gettime_relative();
3479  }
3480  }
3481  case SDL_MOUSEMOTION:
3482  if (cursor_hidden) {
3483  SDL_ShowCursor(1);
3484  cursor_hidden = 0;
3485  }
3487  if (event.type == SDL_MOUSEBUTTONDOWN) {
3488  if (event.button.button != SDL_BUTTON_RIGHT)
3489  break;
3490  x = event.button.x;
3491  } else {
3492  if (!(event.motion.state & SDL_BUTTON_RMASK))
3493  break;
3494  x = event.motion.x;
3495  }
3496  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3497  uint64_t size = avio_size(cur_stream->ic->pb);
3498  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3499  } else {
3500  int64_t ts;
3501  int ns, hh, mm, ss;
3502  int tns, thh, tmm, tss;
3503  tns = cur_stream->ic->duration / 1000000LL;
3504  thh = tns / 3600;
3505  tmm = (tns % 3600) / 60;
3506  tss = (tns % 60);
3507  frac = x / cur_stream->width;
3508  ns = frac * tns;
3509  hh = ns / 3600;
3510  mm = (ns % 3600) / 60;
3511  ss = (ns % 60);
3513  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3514  hh, mm, ss, thh, tmm, tss);
3515  ts = frac * cur_stream->ic->duration;
3516  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3517  ts += cur_stream->ic->start_time;
3518  stream_seek(cur_stream, ts, 0, 0);
3519  }
3520  break;
3521  case SDL_WINDOWEVENT:
3522  switch (event.window.event) {
3523  case SDL_WINDOWEVENT_SIZE_CHANGED:
3524  screen_width = cur_stream->width = event.window.data1;
3525  screen_height = cur_stream->height = event.window.data2;
3526  if (cur_stream->vis_texture) {
3527  SDL_DestroyTexture(cur_stream->vis_texture);
3528  cur_stream->vis_texture = NULL;
3529  }
3530  if (vk_renderer)
3532  case SDL_WINDOWEVENT_EXPOSED:
3533  cur_stream->force_refresh = 1;
3534  }
3535  break;
3536  case SDL_QUIT:
3537  case FF_QUIT_EVENT:
3538  do_exit(cur_stream);
3539  break;
3540  default:
3541  break;
3542  }
3543  }
3544 }
3545 
3546 static int opt_width(void *optctx, const char *opt, const char *arg)
3547 {
3548  double num;
3549  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3550  if (ret < 0)
3551  return ret;
3552 
3553  screen_width = num;
3554  return 0;
3555 }
3556 
3557 static int opt_height(void *optctx, const char *opt, const char *arg)
3558 {
3559  double num;
3560  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3561  if (ret < 0)
3562  return ret;
3563 
3564  screen_height = num;
3565  return 0;
3566 }
3567 
3568 static int opt_format(void *optctx, const char *opt, const char *arg)
3569 {
3571  if (!file_iformat) {
3572  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3573  return AVERROR(EINVAL);
3574  }
3575  return 0;
3576 }
3577 
3578 static int opt_sync(void *optctx, const char *opt, const char *arg)
3579 {
3580  if (!strcmp(arg, "audio"))
3582  else if (!strcmp(arg, "video"))
3584  else if (!strcmp(arg, "ext"))
3586  else {
3587  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3588  exit(1);
3589  }
3590  return 0;
3591 }
3592 
3593 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3594 {
3595  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3596  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3597  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3598 
3599  if (show_mode == SHOW_MODE_NONE) {
3600  double num;
3601  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3602  if (ret < 0)
3603  return ret;
3604  show_mode = num;
3605  }
3606  return 0;
3607 }
3608 
3609 static int opt_input_file(void *optctx, const char *filename)
3610 {
3611  if (input_filename) {
3613  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3614  filename, input_filename);
3615  return AVERROR(EINVAL);
3616  }
3617  if (!strcmp(filename, "-"))
3618  filename = "fd:";
3619  input_filename = av_strdup(filename);
3620  if (!input_filename)
3621  return AVERROR(ENOMEM);
3622 
3623  return 0;
3624 }
3625 
3626 static int opt_codec(void *optctx, const char *opt, const char *arg)
3627 {
3628  const char *spec = strchr(opt, ':');
3629  const char **name;
3630  if (!spec) {
3632  "No media specifier was specified in '%s' in option '%s'\n",
3633  arg, opt);
3634  return AVERROR(EINVAL);
3635  }
3636  spec++;
3637 
3638  switch (spec[0]) {
3639  case 'a' : name = &audio_codec_name; break;
3640  case 's' : name = &subtitle_codec_name; break;
3641  case 'v' : name = &video_codec_name; break;
3642  default:
3644  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3645  return AVERROR(EINVAL);
3646  }
3647 
3648  av_freep(name);
3649  *name = av_strdup(arg);
3650  return *name ? 0 : AVERROR(ENOMEM);
3651 }
3652 
3653 static int dummy;
3654 
3655 static const OptionDef options[] = {
3657  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3658  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3659  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3660  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3661  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3662  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3663  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3664  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3665  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3666  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3667  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3668  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3669  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3670  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3671  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3672  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3673  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3674  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3675  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3676  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3677  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3678  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3679  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3680  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3681  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3682  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3683  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3684  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3685  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3686  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3687  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3688  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3689  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3690  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3691  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3692  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3693  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3694  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3695  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3696  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3697  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3698  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3699  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3700  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3701  "read and decode the streams to fill missing information with heuristics" },
3702  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3703  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3704  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3705  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3706  { NULL, },
3707 };
3708 
3709 static void show_usage(void)
3710 {
3711  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3712  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3713  av_log(NULL, AV_LOG_INFO, "\n");
3714 }
3715 
3716 void show_help_default(const char *opt, const char *arg)
3717 {
3719  show_usage();
3720  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3721  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3722  printf("\n");
3726  printf("\nWhile playing:\n"
3727  "q, ESC quit\n"
3728  "f toggle full screen\n"
3729  "p, SPC pause\n"
3730  "m toggle mute\n"
3731  "9, 0 decrease and increase volume respectively\n"
3732  "/, * decrease and increase volume respectively\n"
3733  "a cycle audio channel in the current program\n"
3734  "v cycle video channel\n"
3735  "t cycle subtitle channel in the current program\n"
3736  "c cycle program\n"
3737  "w cycle video filters or show modes\n"
3738  "s activate frame-step mode\n"
3739  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3740  "down/up seek backward/forward 1 minute\n"
3741  "page down/page up seek backward/forward 10 minutes\n"
3742  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3743  "left double-click toggle full screen\n"
3744  );
3745 }
3746 
3747 /* Called from the main */
3748 int main(int argc, char **argv)
3749 {
3750  int flags, ret;
3751  VideoState *is;
3752 
3753  init_dynload();
3754 
3756  parse_loglevel(argc, argv, options);
3757 
3758  /* register all codecs, demux and protocols */
3759 #if CONFIG_AVDEVICE
3761 #endif
3763 
3764  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3765  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3766 
3767  show_banner(argc, argv, options);
3768 
3769  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3770  if (ret < 0)
3771  exit(ret == AVERROR_EXIT ? 0 : 1);
3772 
3773  if (!input_filename) {
3774  show_usage();
3775  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3777  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3778  exit(1);
3779  }
3780 
3781  if (display_disable) {
3782  video_disable = 1;
3783  }
3784  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3785  if (audio_disable)
3786  flags &= ~SDL_INIT_AUDIO;
3787  else {
3788  /* Try to work around an occasional ALSA buffer underflow issue when the
3789  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3790  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3791  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3792  }
3793  if (display_disable)
3794  flags &= ~SDL_INIT_VIDEO;
3795  if (SDL_Init (flags)) {
3796  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3797  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3798  exit(1);
3799  }
3800 
3801  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3802  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3803 
3804  if (!display_disable) {
3805  int flags = SDL_WINDOW_HIDDEN;
3806  if (alwaysontop)
3807 #if SDL_VERSION_ATLEAST(2,0,5)
3808  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3809 #else
3810  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3811 #endif
3812  if (borderless)
3813  flags |= SDL_WINDOW_BORDERLESS;
3814  else
3815  flags |= SDL_WINDOW_RESIZABLE;
3816 
3817 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3818  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3819 #endif
3820  if (hwaccel && !enable_vulkan) {
3821  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3822  enable_vulkan = 1;
3823  }
3824  if (enable_vulkan) {
3826  if (vk_renderer) {
3827 #if SDL_VERSION_ATLEAST(2, 0, 6)
3828  flags |= SDL_WINDOW_VULKAN;
3829 #endif
3830  } else {
3831  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3832  enable_vulkan = 0;
3833  }
3834  }
3835  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3836  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3837  if (!window) {
3838  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3839  do_exit(NULL);
3840  }
3841 
3842  if (vk_renderer) {
3843  AVDictionary *dict = NULL;
3844 
3845  if (vulkan_params)
3846  av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3848  av_dict_free(&dict);
3849  if (ret < 0) {
3850  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3851  do_exit(NULL);
3852  }
3853  } else {
3854  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3855  if (!renderer) {
3856  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3857  renderer = SDL_CreateRenderer(window, -1, 0);
3858  }
3859  if (renderer) {
3860  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3861  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3862  }
3863  if (!renderer || !renderer_info.num_texture_formats) {
3864  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3865  do_exit(NULL);
3866  }
3867  }
3868  }
3869 
3871  if (!is) {
3872  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3873  do_exit(NULL);
3874  }
3875 
3876  event_loop(is);
3877 
3878  /* never returns */
3879 
3880  return 0;
3881 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
AVSubtitle
Definition: avcodec.h:2227
rect::w
int w
Definition: f_ebur128.c:78
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2500
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1306
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:215
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:427
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:110
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:285
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:162
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:98
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:266
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:135
av_sync_type
static int av_sync_type
Definition: ffplay.c:328
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:366
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:151
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1110
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:805
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:300
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1993
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:397
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:789
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:194
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:593
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1184
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:811
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:443
out
FILE * out
Definition: movenc.c:54
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:267
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1050
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2158
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:234
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:858
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1338
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:453
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:87
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1355
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:176
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:244
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:582
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:674
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:179
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:814
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:392
display_disable
static int display_disable
Definition: ffplay.c:323
screen_width
static int screen_width
Definition: ffplay.c:313
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:59
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:903
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:105
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:78
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1524
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:479
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:228
screen_left
static int screen_left
Definition: ffplay.c:315
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:806
AudioParams::frame_size
int frame_size
Definition: ffplay.c:136
AVSubtitleRect
Definition: avcodec.h:2200
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:199
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2147
rect::y
int y
Definition: f_ebur128.c:78
FrameQueue::size
int size
Definition: ffplay.c:174
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:196
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:165
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:273
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1756
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:375
AudioParams
Definition: ffplay.c:132
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:278
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:254
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1323
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:283
AVFrame::width
int width
Definition: frame.h:447
VideoState::xleft
int xleft
Definition: ffplay.c:292
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:373
Frame::pts
double pts
Definition: ffplay.c:159
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:683
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:168
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:692
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:343
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:87
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:69
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:174
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:811
AVChapter::start
int64_t start
Definition: avformat.h:1217
Clock
Definition: ffplay.c:140
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:129
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:191
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:61
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2434
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:452
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:239
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:102
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:107
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:78
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:143
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:351
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:540
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:373
video_disable
static int video_disable
Definition: ffplay.c:318
Frame::uploaded
int uploaded
Definition: ffplay.c:166
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1205
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1539
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:155
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3609
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:186
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1270
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1525
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:75
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:832
VideoState::paused
int paused
Definition: ffplay.c:209
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1420
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1453
VideoState::width
int width
Definition: ffplay.c:292
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:308
sample_rate
sample_rate
Definition: ffmpeg_filter.c:410
dummy
static int dummy
Definition: ffplay.c:3653
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:362
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:74
PacketQueue
Definition: ffplay.c:116
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:902
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2261
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:590
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:396
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:302
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:261
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
OptionDef
Definition: cmdutils.h:126
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2364
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:319
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:224
genpts
static int genpts
Definition: ffplay.c:332
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:256
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3578
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1531
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:909
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:226
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:372
FrameQueue::rindex
int rindex
Definition: ffplay.c:172
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1374
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:362
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1528
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:73
startup_volume
static int startup_volume
Definition: ffplay.c:326
window
static SDL_Window * window
Definition: ffplay.c:364
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3286
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:147
VideoState::extclk
Clock extclk
Definition: ffplay.c:222
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:213
alwaysontop
static int alwaysontop
Definition: ffplay.c:325
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:242
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:473
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:98
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:170
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:444
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2201
VideoState::video_stream
int video_stream
Definition: ffplay.c:284
autoexit
static int autoexit
Definition: ffplay.c:335
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1214
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:966
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:776
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3593
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:196
pts
static int64_t pts
Definition: transcode_aac.c:643
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1414
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:240
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:740
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:238
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:302
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:331
loop
static int loop
Definition: ffplay.c:338
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:558
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:268
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:718
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3557
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:393
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1397
is_full_screen
static int is_full_screen
Definition: ffplay.c:359
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:929
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:950
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:167
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:59
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1490
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:804
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:548
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2069
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1408
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:760
VideoState
Definition: ffplay.c:204
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:737
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2473
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1428
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:139
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:214
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:725
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:644
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:260
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:192
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3716
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
default_height
static int default_height
Definition: ffplay.c:312
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1406
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:144
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1267
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:456
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:143
PacketQueue::duration
int64_t duration
Definition: ffplay.c:120
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2202
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:134
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:864
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:314
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:95
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:193
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1863
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:695
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1051
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:182
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3626
AVPacketSideData::data
uint8_t * data
Definition: packet.h:374
Clock::pts_drift
double pts_drift
Definition: ffplay.c:142
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:286
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:727
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:185
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:101
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:177
nb_streams
static int nb_streams
Definition: ffprobe.c:383
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
vk_renderer_get_hw_dev
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
Definition: ffplay_renderer.c:817
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:208
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1388
vk_renderer_display
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame)
Definition: ffplay_renderer.c:822
screen_top
static int screen_top
Definition: ffplay.c:316
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:241
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:93
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:365
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst)
Filter out options for given codec.
Definition: cmdutils.c:990
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1569
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:454
VideoState::step
int step
Definition: ffplay.c:293
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2316
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:310
Clock::speed
double speed
Definition: ffplay.c:144
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:184
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:261
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:442
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:629
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:78
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2203
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3321
VkRenderer
Definition: ffplay_renderer.c:48
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1436
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:186
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:713
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1243
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:782
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:999
FrameQueue::max_size
int max_size
Definition: ffplay.c:175
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:142
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
Decoder
Definition: ffmpeg.h:333
AudioParams::freq
int freq
Definition: ffplay.c:133
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:134
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2516
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3207
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:258
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:357
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1297
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:452
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:686
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:171
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:265
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:195
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:346
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1183
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:732
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:71
Frame::duration
double duration
Definition: ffplay.c:160
lowres
static int lowres
Definition: ffplay.c:333
double
double
Definition: af_crystalizer.c:131
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:161
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:282
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1400
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1513
TextureFormatEntry
Definition: ffplay.c:371
AVFilterGraph
Definition: avfilter.h:813
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2624
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:435
fp
#define fp
Definition: regdef.h:44
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:270
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:484
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:448
exp
int8_t exp
Definition: eval.c:74
VideoState::seek_req
int seek_req
Definition: ffplay.c:212
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:261
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:236
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:216
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3349
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:292
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:184
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:791
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: avpacket.c:654
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:971
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:263
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1311
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:337
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:206
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:200
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1854
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:243
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:61
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:360
eval.h
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:298
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2499
Frame::format
int format
Definition: ffplay.c:164
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:121
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:39
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:477
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
OPT_TYPE_INT
@ OPT_TYPE_INT
Definition: cmdutils.h:84
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:462
AVPacket::size
int size
Definition: packet.h:523
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:298
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:681
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:250
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:160
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:214
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:178
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:747
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:144
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
Frame::sub
AVSubtitle sub
Definition: ffplay.c:157
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:302
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:348
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:574
create_hwaccel
static int create_hwaccel(AVBufferRef **device_ctx)
Definition: ffplay.c:2592
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:569
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
sp
#define sp
Definition: regdef.h:63
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
sdl_supported_color_spaces
static enum AVColorSpace sdl_supported_color_spaces[]
Definition: ffplay.c:943
start_time
static int64_t start_time
Definition: ffplay.c:329
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:261
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1057
Frame::serial
int serial
Definition: ffplay.c:158
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:551
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:65
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:271
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2817
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:471
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:662
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:146
rect::h
int h
Definition: f_ebur128.c:78
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:274
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:120
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:275
OPT_TYPE_INT64
@ OPT_TYPE_INT64
Definition: cmdutils.h:85
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:462
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:264
fn
#define fn(a)
Definition: aap_template.c:37
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:320
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:340
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:287
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:164
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:507
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:521
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:257
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1581
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1519
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:820
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1589
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:118
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:608
seek_interval
static float seek_interval
Definition: ffplay.c:322
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
rect::x
int x
Definition: f_ebur128.c:78
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:214
OPT_TYPE_FUNC
@ OPT_TYPE_FUNC
Definition: cmdutils.h:81
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:779
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:367
OPT_TYPE_BOOL
@ OPT_TYPE_BOOL
Definition: cmdutils.h:82
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:294
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1333
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:800
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:410
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:515
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:344
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:359
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:493
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:830
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:535
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:67
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:406
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:68
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:349
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:211
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:674
FrameQueue::windex
int windex
Definition: ffplay.c:173
VideoState::filename
char * filename
Definition: ffplay.c:291
VideoState::muted
int muted
Definition: ffplay.c:252
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:197
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:455
bprint.h
Clock::pts
double pts
Definition: ffplay.c:141
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:703
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:127
PacketQueue::serial
int serial
Definition: ffplay.c:122
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:515
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:253
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:65
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:246
OPT_TYPE_TIME
@ OPT_TYPE_TIME
Definition: cmdutils.h:88
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:919
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:60
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1056
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:198
AVCodecParameters::height
int height
Definition: codec_par.h:135
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:436
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:190
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1350
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:891
show_status
static int show_status
Definition: ffplay.c:327
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3568
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:408
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:466
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
vk_renderer_resize
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
Definition: ffplay_renderer.c:827
borderless
static int borderless
Definition: ffplay.c:324
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2296
MyAVPacketList
Definition: ffplay.c:111
OPT_FUNC_ARG
#define OPT_FUNC_ARG
Definition: cmdutils.h:136
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1179
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:191
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:156
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:470
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
vk_renderer
static VkRenderer * vk_renderer
Definition: ffplay.c:369
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:118
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:130
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:763
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:465
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:543
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:145
VideoState::height
int height
Definition: ffplay.c:292
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:812
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1407
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:225
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:321
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:70
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:750
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2204
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3148
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:364
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:455
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:295
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:353
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:73
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:347
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:261
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:600
find_stream_info
static int find_stream_info
Definition: ffplay.c:352
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:421
hwaccel
static const char * hwaccel
Definition: ffplay.c:356
pos
unsigned int pos
Definition: spdifenc.c:413
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:249
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:297
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:482
VideoState::last_paused
int last_paused
Definition: ffplay.c:210
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:345
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:113
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3546
enable_vulkan
static int enable_vulkan
Definition: ffplay.c:354
main
int main(int argc, char **argv)
Definition: ffplay.c:3748
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:570
show_usage
static void show_usage(void)
Definition: ffplay.c:3709
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:447
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:123
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:526
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:221
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:342
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:85
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:341
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:124
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2425
PacketQueue::size
int size
Definition: ffplay.c:119
options
static const OptionDef options[]
Definition: ffplay.c:3655
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:567
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:296
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:277
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:207
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:829
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:432
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:248
VideoState::eof
int eof
Definition: ffplay.c:289
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:200
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:83
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:681
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:205
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:251
VideoState::subdec
Decoder subdec
Definition: ffplay.c:230
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:238
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2793
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:299
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:145
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1390
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:524
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:465
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:128
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
input_filename
static const char * input_filename
Definition: ffplay.c:309
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:439
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1500
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:304
vulkan_params
static char * vulkan_params
Definition: ffplay.c:355
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:167
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3292
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:453
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:273
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:481
VideoState::real_data
float * real_data
Definition: ffplay.c:269
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1787
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
default_width
static int default_width
Definition: ffplay.c:311
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1820
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
VideoState::realtime
int realtime
Definition: ffplay.c:218
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:288
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
OPT_TYPE_STRING
@ OPT_TYPE_STRING
Definition: cmdutils.h:83
AVPacket
This structure stores compressed data.
Definition: packet.h:499
audio_disable
static int audio_disable
Definition: ffplay.c:317
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3304
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:555
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1212
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:279
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:411
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:201
d
d
Definition: ffmpeg_filter.c:410
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:339
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:232
imgutils.h
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:247
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:420
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1380
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:121
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:217
VideoState::viddec
Decoder viddec
Definition: ffplay.c:229
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:792
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:245
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:742
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:272
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1486
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:334
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:255
afilters
static char * afilters
Definition: ffplay.c:350
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1216
int
int
Definition: ffmpeg_filter.c:410
SwsContext
Definition: swscale_internal.h:299
VideoState::audclk
Clock audclk
Definition: ffplay.c:220
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1611
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
print_error
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.h:401
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:180
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1061
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:81
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:137
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1471
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2229
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:90
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:94
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:466
duration
static int64_t duration
Definition: ffplay.c:330
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:112
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2800
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:312
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:117
Frame::height
int height
Definition: ffplay.c:163
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2787
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:281
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:237
avdevice_register_all
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:70
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:345
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:840
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:467
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:336