FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include "config_components.h"
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/mem.h"
37 #include "libavutil/pixdesc.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/fifo.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/time.h"
42 #include "libavutil/bprint.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/tx.h"
49 
50 #include "libavfilter/avfilter.h"
51 #include "libavfilter/buffersink.h"
52 #include "libavfilter/buffersrc.h"
53 
54 #include <SDL.h>
55 #include <SDL_thread.h>
56 
57 #include "cmdutils.h"
58 #include "ffplay_renderer.h"
59 #include "opt_common.h"
60 
61 const char program_name[] = "ffplay";
62 const int program_birth_year = 2003;
63 
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_FRAMES 25
66 #define EXTERNAL_CLOCK_MIN_FRAMES 2
67 #define EXTERNAL_CLOCK_MAX_FRAMES 10
68 
69 /* Minimum SDL audio buffer size, in samples. */
70 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
71 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
72 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
73 
74 /* Step size for volume control in dB */
75 #define SDL_VOLUME_STEP (0.75)
76 
77 /* no AV sync correction is done if below the minimum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MIN 0.04
79 /* AV sync correction is done if above the maximum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MAX 0.1
81 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
82 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
83 /* no AV correction is done if too big error */
84 #define AV_NOSYNC_THRESHOLD 10.0
85 
86 /* maximum audio speed change to get correct sync */
87 #define SAMPLE_CORRECTION_PERCENT_MAX 10
88 
89 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
90 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
91 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
92 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
93 
94 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
95 #define AUDIO_DIFF_AVG_NB 20
96 
97 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
98 #define REFRESH_RATE 0.01
99 
100 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
101 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
102 #define SAMPLE_ARRAY_SIZE (8 * 65536)
103 
104 #define CURSOR_HIDE_DELAY 1000000
105 
106 #define USE_ONEPASS_SUBTITLE_RENDER 1
107 
108 typedef struct MyAVPacketList {
110  int serial;
112 
113 typedef struct PacketQueue {
116  int size;
117  int64_t duration;
119  int serial;
120  SDL_mutex *mutex;
121  SDL_cond *cond;
122 } PacketQueue;
123 
124 #define VIDEO_PICTURE_QUEUE_SIZE 3
125 #define SUBPICTURE_QUEUE_SIZE 16
126 #define SAMPLE_QUEUE_SIZE 9
127 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
128 
129 typedef struct AudioParams {
130  int freq;
135 } AudioParams;
136 
137 typedef struct Clock {
138  double pts; /* clock base */
139  double pts_drift; /* clock base minus time at which we updated the clock */
140  double last_updated;
141  double speed;
142  int serial; /* clock is based on a packet with this serial */
143  int paused;
144  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
145 } Clock;
146 
147 typedef struct FrameData {
148  int64_t pkt_pos;
149 } FrameData;
150 
151 /* Common struct for handling all types of decoded data and allocated render buffers. */
152 typedef struct Frame {
155  int serial;
156  double pts; /* presentation timestamp for the frame */
157  double duration; /* estimated duration of the frame */
158  int64_t pos; /* byte position of the frame in the input file */
159  int width;
160  int height;
161  int format;
163  int uploaded;
164  int flip_v;
165 } Frame;
166 
167 typedef struct FrameQueue {
169  int rindex;
170  int windex;
171  int size;
172  int max_size;
175  SDL_mutex *mutex;
176  SDL_cond *cond;
178 } FrameQueue;
179 
180 enum {
181  AV_SYNC_AUDIO_MASTER, /* default choice */
183  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
184 };
185 
186 typedef struct Decoder {
191  int finished;
193  SDL_cond *empty_queue_cond;
194  int64_t start_pts;
196  int64_t next_pts;
198  SDL_Thread *decoder_tid;
199 } Decoder;
200 
201 typedef struct VideoState {
202  SDL_Thread *read_tid;
206  int paused;
209  int seek_req;
211  int64_t seek_pos;
212  int64_t seek_rel;
215  int realtime;
216 
220 
224 
228 
230 
232 
233  double audio_clock;
235  double audio_diff_cum; /* used for AV difference average computation */
242  uint8_t *audio_buf;
243  uint8_t *audio_buf1;
244  unsigned int audio_buf_size; /* in bytes */
245  unsigned int audio_buf1_size;
246  int audio_buf_index; /* in bytes */
249  int muted;
256 
257  enum ShowMode {
259  } show_mode;
266  float *real_data;
268  int xpos;
270  SDL_Texture *vis_texture;
271  SDL_Texture *sub_texture;
272  SDL_Texture *vid_texture;
273 
277 
278  double frame_timer;
284  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
286  int eof;
287 
288  char *filename;
290  int step;
291 
293  AVFilterContext *in_video_filter; // the first filter in the video chain
294  AVFilterContext *out_video_filter; // the last filter in the video chain
295  AVFilterContext *in_audio_filter; // the first filter in the audio chain
296  AVFilterContext *out_audio_filter; // the last filter in the audio chain
297  AVFilterGraph *agraph; // audio filter graph
298 
300 
302 } VideoState;
303 
304 /* options specified by the user */
306 static const char *input_filename;
307 static const char *window_title;
308 static int default_width = 640;
309 static int default_height = 480;
310 static int screen_width = 0;
311 static int screen_height = 0;
312 static int screen_left = SDL_WINDOWPOS_CENTERED;
313 static int screen_top = SDL_WINDOWPOS_CENTERED;
314 static int audio_disable;
315 static int video_disable;
316 static int subtitle_disable;
317 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
318 static int seek_by_bytes = -1;
319 static float seek_interval = 10;
320 static int display_disable;
321 static int borderless;
322 static int alwaysontop;
323 static int startup_volume = 100;
324 static int show_status = -1;
326 static int64_t start_time = AV_NOPTS_VALUE;
327 static int64_t duration = AV_NOPTS_VALUE;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
343 static int64_t cursor_last_shown;
344 static int cursor_hidden = 0;
345 static const char **vfilters_list = NULL;
346 static int nb_vfilters = 0;
347 static char *afilters = NULL;
348 static int autorotate = 1;
349 static int find_stream_info = 1;
350 static int filter_nbthreads = 0;
351 static int enable_vulkan = 0;
352 static char *vulkan_params = NULL;
353 static const char *hwaccel = NULL;
354 
355 /* current context */
356 static int is_full_screen;
357 static int64_t audio_callback_time;
358 
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 static SDL_RendererInfo renderer_info = {0};
364 static SDL_AudioDeviceID audio_dev;
365 
367 
368 static const struct TextureFormatEntry {
372  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
373  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
374  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
375  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
376  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
377  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
378  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
379  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
380  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
381  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
382  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
383  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
384  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
385  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
386  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
387  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
388  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
389  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
390  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
391  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
392 };
393 
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
397  if (ret < 0)
398  return ret;
399 
401  if (!vfilters_list[nb_vfilters - 1])
402  return AVERROR(ENOMEM);
403 
404  return 0;
405 }
406 
407 static inline
408 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
409  enum AVSampleFormat fmt2, int64_t channel_count2)
410 {
411  /* If channel count == 1, planar and non-planar formats are the same */
412  if (channel_count1 == 1 && channel_count2 == 1)
414  else
415  return channel_count1 != channel_count2 || fmt1 != fmt2;
416 }
417 
419 {
420  MyAVPacketList pkt1;
421  int ret;
422 
423  if (q->abort_request)
424  return -1;
425 
426 
427  pkt1.pkt = pkt;
428  pkt1.serial = q->serial;
429 
430  ret = av_fifo_write(q->pkt_list, &pkt1, 1);
431  if (ret < 0)
432  return ret;
433  q->nb_packets++;
434  q->size += pkt1.pkt->size + sizeof(pkt1);
435  q->duration += pkt1.pkt->duration;
436  /* XXX: should duplicate packet data in DV case */
437  SDL_CondSignal(q->cond);
438  return 0;
439 }
440 
442 {
443  AVPacket *pkt1;
444  int ret;
445 
446  pkt1 = av_packet_alloc();
447  if (!pkt1) {
449  return -1;
450  }
451  av_packet_move_ref(pkt1, pkt);
452 
453  SDL_LockMutex(q->mutex);
454  ret = packet_queue_put_private(q, pkt1);
455  SDL_UnlockMutex(q->mutex);
456 
457  if (ret < 0)
458  av_packet_free(&pkt1);
459 
460  return ret;
461 }
462 
463 static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
464 {
465  pkt->stream_index = stream_index;
466  return packet_queue_put(q, pkt);
467 }
468 
469 /* packet queue handling */
471 {
472  memset(q, 0, sizeof(PacketQueue));
474  if (!q->pkt_list)
475  return AVERROR(ENOMEM);
476  q->mutex = SDL_CreateMutex();
477  if (!q->mutex) {
478  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
479  return AVERROR(ENOMEM);
480  }
481  q->cond = SDL_CreateCond();
482  if (!q->cond) {
483  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
484  return AVERROR(ENOMEM);
485  }
486  q->abort_request = 1;
487  return 0;
488 }
489 
491 {
492  MyAVPacketList pkt1;
493 
494  SDL_LockMutex(q->mutex);
495  while (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0)
496  av_packet_free(&pkt1.pkt);
497  q->nb_packets = 0;
498  q->size = 0;
499  q->duration = 0;
500  q->serial++;
501  SDL_UnlockMutex(q->mutex);
502 }
503 
505 {
508  SDL_DestroyMutex(q->mutex);
509  SDL_DestroyCond(q->cond);
510 }
511 
513 {
514  SDL_LockMutex(q->mutex);
515 
516  q->abort_request = 1;
517 
518  SDL_CondSignal(q->cond);
519 
520  SDL_UnlockMutex(q->mutex);
521 }
522 
524 {
525  SDL_LockMutex(q->mutex);
526  q->abort_request = 0;
527  q->serial++;
528  SDL_UnlockMutex(q->mutex);
529 }
530 
531 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
532 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
533 {
534  MyAVPacketList pkt1;
535  int ret;
536 
537  SDL_LockMutex(q->mutex);
538 
539  for (;;) {
540  if (q->abort_request) {
541  ret = -1;
542  break;
543  }
544 
545  if (av_fifo_read(q->pkt_list, &pkt1, 1) >= 0) {
546  q->nb_packets--;
547  q->size -= pkt1.pkt->size + sizeof(pkt1);
548  q->duration -= pkt1.pkt->duration;
549  av_packet_move_ref(pkt, pkt1.pkt);
550  if (serial)
551  *serial = pkt1.serial;
552  av_packet_free(&pkt1.pkt);
553  ret = 1;
554  break;
555  } else if (!block) {
556  ret = 0;
557  break;
558  } else {
559  SDL_CondWait(q->cond, q->mutex);
560  }
561  }
562  SDL_UnlockMutex(q->mutex);
563  return ret;
564 }
565 
566 static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
567  memset(d, 0, sizeof(Decoder));
568  d->pkt = av_packet_alloc();
569  if (!d->pkt)
570  return AVERROR(ENOMEM);
571  d->avctx = avctx;
572  d->queue = queue;
573  d->empty_queue_cond = empty_queue_cond;
574  d->start_pts = AV_NOPTS_VALUE;
575  d->pkt_serial = -1;
576  return 0;
577 }
578 
580  int ret = AVERROR(EAGAIN);
581 
582  for (;;) {
583  if (d->queue->serial == d->pkt_serial) {
584  do {
585  if (d->queue->abort_request)
586  return -1;
587 
588  switch (d->avctx->codec_type) {
589  case AVMEDIA_TYPE_VIDEO:
590  ret = avcodec_receive_frame(d->avctx, frame);
591  if (ret >= 0) {
592  if (decoder_reorder_pts == -1) {
593  frame->pts = frame->best_effort_timestamp;
594  } else if (!decoder_reorder_pts) {
595  frame->pts = frame->pkt_dts;
596  }
597  }
598  break;
599  case AVMEDIA_TYPE_AUDIO:
600  ret = avcodec_receive_frame(d->avctx, frame);
601  if (ret >= 0) {
602  AVRational tb = (AVRational){1, frame->sample_rate};
603  if (frame->pts != AV_NOPTS_VALUE)
604  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
605  else if (d->next_pts != AV_NOPTS_VALUE)
606  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
607  if (frame->pts != AV_NOPTS_VALUE) {
608  d->next_pts = frame->pts + frame->nb_samples;
609  d->next_pts_tb = tb;
610  }
611  }
612  break;
613  }
614  if (ret == AVERROR_EOF) {
615  d->finished = d->pkt_serial;
616  avcodec_flush_buffers(d->avctx);
617  return 0;
618  }
619  if (ret >= 0)
620  return 1;
621  } while (ret != AVERROR(EAGAIN));
622  }
623 
624  do {
625  if (d->queue->nb_packets == 0)
626  SDL_CondSignal(d->empty_queue_cond);
627  if (d->packet_pending) {
628  d->packet_pending = 0;
629  } else {
630  int old_serial = d->pkt_serial;
631  if (packet_queue_get(d->queue, d->pkt, 1, &d->pkt_serial) < 0)
632  return -1;
633  if (old_serial != d->pkt_serial) {
634  avcodec_flush_buffers(d->avctx);
635  d->finished = 0;
636  d->next_pts = d->start_pts;
637  d->next_pts_tb = d->start_pts_tb;
638  }
639  }
640  if (d->queue->serial == d->pkt_serial)
641  break;
642  av_packet_unref(d->pkt);
643  } while (1);
644 
645  if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
646  int got_frame = 0;
647  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, d->pkt);
648  if (ret < 0) {
649  ret = AVERROR(EAGAIN);
650  } else {
651  if (got_frame && !d->pkt->data) {
652  d->packet_pending = 1;
653  }
654  ret = got_frame ? 0 : (d->pkt->data ? AVERROR(EAGAIN) : AVERROR_EOF);
655  }
656  av_packet_unref(d->pkt);
657  } else {
658  if (d->pkt->buf && !d->pkt->opaque_ref) {
659  FrameData *fd;
660 
661  d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd));
662  if (!d->pkt->opaque_ref)
663  return AVERROR(ENOMEM);
664  fd = (FrameData*)d->pkt->opaque_ref->data;
665  fd->pkt_pos = d->pkt->pos;
666  }
667 
668  if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) {
669  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
670  d->packet_pending = 1;
671  } else {
672  av_packet_unref(d->pkt);
673  }
674  }
675  }
676 }
677 
678 static void decoder_destroy(Decoder *d) {
679  av_packet_free(&d->pkt);
680  avcodec_free_context(&d->avctx);
681 }
682 
684 {
685  av_frame_unref(vp->frame);
686  avsubtitle_free(&vp->sub);
687 }
688 
689 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
690 {
691  int i;
692  memset(f, 0, sizeof(FrameQueue));
693  if (!(f->mutex = SDL_CreateMutex())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  if (!(f->cond = SDL_CreateCond())) {
698  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
699  return AVERROR(ENOMEM);
700  }
701  f->pktq = pktq;
702  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
703  f->keep_last = !!keep_last;
704  for (i = 0; i < f->max_size; i++)
705  if (!(f->queue[i].frame = av_frame_alloc()))
706  return AVERROR(ENOMEM);
707  return 0;
708 }
709 
711 {
712  int i;
713  for (i = 0; i < f->max_size; i++) {
714  Frame *vp = &f->queue[i];
716  av_frame_free(&vp->frame);
717  }
718  SDL_DestroyMutex(f->mutex);
719  SDL_DestroyCond(f->cond);
720 }
721 
723 {
724  SDL_LockMutex(f->mutex);
725  SDL_CondSignal(f->cond);
726  SDL_UnlockMutex(f->mutex);
727 }
728 
730 {
731  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
737 }
738 
740 {
741  return &f->queue[f->rindex];
742 }
743 
745 {
746  /* wait until we have space to put a new frame */
747  SDL_LockMutex(f->mutex);
748  while (f->size >= f->max_size &&
749  !f->pktq->abort_request) {
750  SDL_CondWait(f->cond, f->mutex);
751  }
752  SDL_UnlockMutex(f->mutex);
753 
754  if (f->pktq->abort_request)
755  return NULL;
756 
757  return &f->queue[f->windex];
758 }
759 
761 {
762  /* wait until we have a readable a new frame */
763  SDL_LockMutex(f->mutex);
764  while (f->size - f->rindex_shown <= 0 &&
765  !f->pktq->abort_request) {
766  SDL_CondWait(f->cond, f->mutex);
767  }
768  SDL_UnlockMutex(f->mutex);
769 
770  if (f->pktq->abort_request)
771  return NULL;
772 
773  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
774 }
775 
777 {
778  if (++f->windex == f->max_size)
779  f->windex = 0;
780  SDL_LockMutex(f->mutex);
781  f->size++;
782  SDL_CondSignal(f->cond);
783  SDL_UnlockMutex(f->mutex);
784 }
785 
787 {
788  if (f->keep_last && !f->rindex_shown) {
789  f->rindex_shown = 1;
790  return;
791  }
792  frame_queue_unref_item(&f->queue[f->rindex]);
793  if (++f->rindex == f->max_size)
794  f->rindex = 0;
795  SDL_LockMutex(f->mutex);
796  f->size--;
797  SDL_CondSignal(f->cond);
798  SDL_UnlockMutex(f->mutex);
799 }
800 
801 /* return the number of undisplayed frames in the queue */
803 {
804  return f->size - f->rindex_shown;
805 }
806 
807 /* return last shown position */
809 {
810  Frame *fp = &f->queue[f->rindex];
811  if (f->rindex_shown && fp->serial == f->pktq->serial)
812  return fp->pos;
813  else
814  return -1;
815 }
816 
817 static void decoder_abort(Decoder *d, FrameQueue *fq)
818 {
819  packet_queue_abort(d->queue);
820  frame_queue_signal(fq);
821  SDL_WaitThread(d->decoder_tid, NULL);
822  d->decoder_tid = NULL;
823  packet_queue_flush(d->queue);
824 }
825 
826 static inline void fill_rectangle(int x, int y, int w, int h)
827 {
828  SDL_Rect rect;
829  rect.x = x;
830  rect.y = y;
831  rect.w = w;
832  rect.h = h;
833  if (w && h)
834  SDL_RenderFillRect(renderer, &rect);
835 }
836 
837 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
838 {
839  Uint32 format;
840  int access, w, h;
841  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
842  void *pixels;
843  int pitch;
844  if (*texture)
845  SDL_DestroyTexture(*texture);
846  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
847  return -1;
848  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
849  return -1;
850  if (init_texture) {
851  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
852  return -1;
853  memset(pixels, 0, pitch * new_height);
854  SDL_UnlockTexture(*texture);
855  }
856  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
857  }
858  return 0;
859 }
860 
861 static void calculate_display_rect(SDL_Rect *rect,
862  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
863  int pic_width, int pic_height, AVRational pic_sar)
864 {
865  AVRational aspect_ratio = pic_sar;
866  int64_t width, height, x, y;
867 
868  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
869  aspect_ratio = av_make_q(1, 1);
870 
871  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
872 
873  /* XXX: we suppose the screen has a 1.0 pixel ratio */
874  height = scr_height;
875  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
876  if (width > scr_width) {
877  width = scr_width;
878  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
879  }
880  x = (scr_width - width) / 2;
881  y = (scr_height - height) / 2;
882  rect->x = scr_xleft + x;
883  rect->y = scr_ytop + y;
884  rect->w = FFMAX((int)width, 1);
885  rect->h = FFMAX((int)height, 1);
886 }
887 
888 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
889 {
890  int i;
891  *sdl_blendmode = SDL_BLENDMODE_NONE;
892  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
893  if (format == AV_PIX_FMT_RGB32 ||
897  *sdl_blendmode = SDL_BLENDMODE_BLEND;
898  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
900  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
901  return;
902  }
903  }
904 }
905 
906 static int upload_texture(SDL_Texture **tex, AVFrame *frame)
907 {
908  int ret = 0;
909  Uint32 sdl_pix_fmt;
910  SDL_BlendMode sdl_blendmode;
911  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
912  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
913  return -1;
914  switch (sdl_pix_fmt) {
915  case SDL_PIXELFORMAT_IYUV:
916  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
917  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
918  frame->data[1], frame->linesize[1],
919  frame->data[2], frame->linesize[2]);
920  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
921  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
922  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
923  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
924  } else {
925  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
926  return -1;
927  }
928  break;
929  default:
930  if (frame->linesize[0] < 0) {
931  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
932  } else {
933  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
934  }
935  break;
936  }
937  return ret;
938 }
939 
945 };
946 
948 {
949 #if SDL_VERSION_ATLEAST(2,0,8)
950  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
951  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
952  if (frame->color_range == AVCOL_RANGE_JPEG)
953  mode = SDL_YUV_CONVERSION_JPEG;
954  else if (frame->colorspace == AVCOL_SPC_BT709)
955  mode = SDL_YUV_CONVERSION_BT709;
956  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
957  mode = SDL_YUV_CONVERSION_BT601;
958  }
959  SDL_SetYUVConversionMode(mode); /* FIXME: no support for linear transfer */
960 #endif
961 }
962 
964 {
965  Frame *vp;
966  Frame *sp = NULL;
967  SDL_Rect rect;
968 
969  vp = frame_queue_peek_last(&is->pictq);
970  if (vk_renderer) {
972  return;
973  }
974 
975  if (is->subtitle_st) {
976  if (frame_queue_nb_remaining(&is->subpq) > 0) {
977  sp = frame_queue_peek(&is->subpq);
978 
979  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
980  if (!sp->uploaded) {
981  uint8_t* pixels[4];
982  int pitch[4];
983  int i;
984  if (!sp->width || !sp->height) {
985  sp->width = vp->width;
986  sp->height = vp->height;
987  }
988  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
989  return;
990 
991  for (i = 0; i < sp->sub.num_rects; i++) {
992  AVSubtitleRect *sub_rect = sp->sub.rects[i];
993 
994  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
995  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
996  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
997  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
998 
999  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1000  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1001  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1002  0, NULL, NULL, NULL);
1003  if (!is->sub_convert_ctx) {
1004  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1005  return;
1006  }
1007  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1008  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1009  0, sub_rect->h, pixels, pitch);
1010  SDL_UnlockTexture(is->sub_texture);
1011  }
1012  }
1013  sp->uploaded = 1;
1014  }
1015  } else
1016  sp = NULL;
1017  }
1018  }
1019 
1020  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1022 
1023  if (!vp->uploaded) {
1024  if (upload_texture(&is->vid_texture, vp->frame) < 0) {
1026  return;
1027  }
1028  vp->uploaded = 1;
1029  vp->flip_v = vp->frame->linesize[0] < 0;
1030  }
1031 
1032  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1034  if (sp) {
1035 #if USE_ONEPASS_SUBTITLE_RENDER
1036  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1037 #else
1038  int i;
1039  double xratio = (double)rect.w / (double)sp->width;
1040  double yratio = (double)rect.h / (double)sp->height;
1041  for (i = 0; i < sp->sub.num_rects; i++) {
1042  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1043  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1044  .y = rect.y + sub_rect->y * yratio,
1045  .w = sub_rect->w * xratio,
1046  .h = sub_rect->h * yratio};
1047  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1048  }
1049 #endif
1050  }
1051 }
1052 
1053 static inline int compute_mod(int a, int b)
1054 {
1055  return a < 0 ? a%b + b : a%b;
1056 }
1057 
1059 {
1060  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1061  int ch, channels, h, h2;
1062  int64_t time_diff;
1063  int rdft_bits, nb_freq;
1064 
1065  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1066  ;
1067  nb_freq = 1 << (rdft_bits - 1);
1068 
1069  /* compute display index : center on currently output samples */
1070  channels = s->audio_tgt.ch_layout.nb_channels;
1071  nb_display_channels = channels;
1072  if (!s->paused) {
1073  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1074  n = 2 * channels;
1075  delay = s->audio_write_buf_size;
1076  delay /= n;
1077 
1078  /* to be more precise, we take into account the time spent since
1079  the last buffer computation */
1080  if (audio_callback_time) {
1081  time_diff = av_gettime_relative() - audio_callback_time;
1082  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1083  }
1084 
1085  delay += 2 * data_used;
1086  if (delay < data_used)
1087  delay = data_used;
1088 
1089  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1090  if (s->show_mode == SHOW_MODE_WAVES) {
1091  h = INT_MIN;
1092  for (i = 0; i < 1000; i += channels) {
1093  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1094  int a = s->sample_array[idx];
1095  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1096  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1097  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1098  int score = a - d;
1099  if (h < score && (b ^ c) < 0) {
1100  h = score;
1101  i_start = idx;
1102  }
1103  }
1104  }
1105 
1106  s->last_i_start = i_start;
1107  } else {
1108  i_start = s->last_i_start;
1109  }
1110 
1111  if (s->show_mode == SHOW_MODE_WAVES) {
1112  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1113 
1114  /* total height for one channel */
1115  h = s->height / nb_display_channels;
1116  /* graph height / 2 */
1117  h2 = (h * 9) / 20;
1118  for (ch = 0; ch < nb_display_channels; ch++) {
1119  i = i_start + ch;
1120  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1121  for (x = 0; x < s->width; x++) {
1122  y = (s->sample_array[i] * h2) >> 15;
1123  if (y < 0) {
1124  y = -y;
1125  ys = y1 - y;
1126  } else {
1127  ys = y1;
1128  }
1129  fill_rectangle(s->xleft + x, ys, 1, y);
1130  i += channels;
1131  if (i >= SAMPLE_ARRAY_SIZE)
1132  i -= SAMPLE_ARRAY_SIZE;
1133  }
1134  }
1135 
1136  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1137 
1138  for (ch = 1; ch < nb_display_channels; ch++) {
1139  y = s->ytop + ch * h;
1140  fill_rectangle(s->xleft, y, s->width, 1);
1141  }
1142  } else {
1143  int err = 0;
1144  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1145  return;
1146 
1147  if (s->xpos >= s->width)
1148  s->xpos = 0;
1149  nb_display_channels= FFMIN(nb_display_channels, 2);
1150  if (rdft_bits != s->rdft_bits) {
1151  const float rdft_scale = 1.0;
1152  av_tx_uninit(&s->rdft);
1153  av_freep(&s->real_data);
1154  av_freep(&s->rdft_data);
1155  s->rdft_bits = rdft_bits;
1156  s->real_data = av_malloc_array(nb_freq, 4 *sizeof(*s->real_data));
1157  s->rdft_data = av_malloc_array(nb_freq + 1, 2 *sizeof(*s->rdft_data));
1158  err = av_tx_init(&s->rdft, &s->rdft_fn, AV_TX_FLOAT_RDFT,
1159  0, 1 << rdft_bits, &rdft_scale, 0);
1160  }
1161  if (err < 0 || !s->rdft_data) {
1162  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1163  s->show_mode = SHOW_MODE_WAVES;
1164  } else {
1165  float *data_in[2];
1166  AVComplexFloat *data[2];
1167  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1168  uint32_t *pixels;
1169  int pitch;
1170  for (ch = 0; ch < nb_display_channels; ch++) {
1171  data_in[ch] = s->real_data + 2 * nb_freq * ch;
1172  data[ch] = s->rdft_data + nb_freq * ch;
1173  i = i_start + ch;
1174  for (x = 0; x < 2 * nb_freq; x++) {
1175  double w = (x-nb_freq) * (1.0 / nb_freq);
1176  data_in[ch][x] = s->sample_array[i] * (1.0 - w * w);
1177  i += channels;
1178  if (i >= SAMPLE_ARRAY_SIZE)
1179  i -= SAMPLE_ARRAY_SIZE;
1180  }
1181  s->rdft_fn(s->rdft, data[ch], data_in[ch], sizeof(float));
1182  data[ch][0].im = data[ch][nb_freq].re;
1183  data[ch][nb_freq].re = 0;
1184  }
1185  /* Least efficient way to do this, we should of course
1186  * directly access it but it is more than fast enough. */
1187  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1188  pitch >>= 2;
1189  pixels += pitch * s->height;
1190  for (y = 0; y < s->height; y++) {
1191  double w = 1 / sqrt(nb_freq);
1192  int a = sqrt(w * sqrt(data[0][y].re * data[0][y].re + data[0][y].im * data[0][y].im));
1193  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][y].re, data[1][y].im))
1194  : a;
1195  a = FFMIN(a, 255);
1196  b = FFMIN(b, 255);
1197  pixels -= pitch;
1198  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1199  }
1200  SDL_UnlockTexture(s->vis_texture);
1201  }
1202  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1203  }
1204  if (!s->paused)
1205  s->xpos++;
1206  }
1207 }
1208 
1209 static void stream_component_close(VideoState *is, int stream_index)
1210 {
1211  AVFormatContext *ic = is->ic;
1212  AVCodecParameters *codecpar;
1213 
1214  if (stream_index < 0 || stream_index >= ic->nb_streams)
1215  return;
1216  codecpar = ic->streams[stream_index]->codecpar;
1217 
1218  switch (codecpar->codec_type) {
1219  case AVMEDIA_TYPE_AUDIO:
1220  decoder_abort(&is->auddec, &is->sampq);
1221  SDL_CloseAudioDevice(audio_dev);
1222  decoder_destroy(&is->auddec);
1223  swr_free(&is->swr_ctx);
1224  av_freep(&is->audio_buf1);
1225  is->audio_buf1_size = 0;
1226  is->audio_buf = NULL;
1227 
1228  if (is->rdft) {
1229  av_tx_uninit(&is->rdft);
1230  av_freep(&is->real_data);
1231  av_freep(&is->rdft_data);
1232  is->rdft = NULL;
1233  is->rdft_bits = 0;
1234  }
1235  break;
1236  case AVMEDIA_TYPE_VIDEO:
1237  decoder_abort(&is->viddec, &is->pictq);
1238  decoder_destroy(&is->viddec);
1239  break;
1240  case AVMEDIA_TYPE_SUBTITLE:
1241  decoder_abort(&is->subdec, &is->subpq);
1242  decoder_destroy(&is->subdec);
1243  break;
1244  default:
1245  break;
1246  }
1247 
1248  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1249  switch (codecpar->codec_type) {
1250  case AVMEDIA_TYPE_AUDIO:
1251  is->audio_st = NULL;
1252  is->audio_stream = -1;
1253  break;
1254  case AVMEDIA_TYPE_VIDEO:
1255  is->video_st = NULL;
1256  is->video_stream = -1;
1257  break;
1258  case AVMEDIA_TYPE_SUBTITLE:
1259  is->subtitle_st = NULL;
1260  is->subtitle_stream = -1;
1261  break;
1262  default:
1263  break;
1264  }
1265 }
1266 
1268 {
1269  /* XXX: use a special url_shutdown call to abort parse cleanly */
1270  is->abort_request = 1;
1271  SDL_WaitThread(is->read_tid, NULL);
1272 
1273  /* close each stream */
1274  if (is->audio_stream >= 0)
1275  stream_component_close(is, is->audio_stream);
1276  if (is->video_stream >= 0)
1277  stream_component_close(is, is->video_stream);
1278  if (is->subtitle_stream >= 0)
1279  stream_component_close(is, is->subtitle_stream);
1280 
1281  avformat_close_input(&is->ic);
1282 
1283  packet_queue_destroy(&is->videoq);
1284  packet_queue_destroy(&is->audioq);
1285  packet_queue_destroy(&is->subtitleq);
1286 
1287  /* free all pictures */
1288  frame_queue_destroy(&is->pictq);
1289  frame_queue_destroy(&is->sampq);
1290  frame_queue_destroy(&is->subpq);
1291  SDL_DestroyCond(is->continue_read_thread);
1292  sws_freeContext(is->sub_convert_ctx);
1293  av_free(is->filename);
1294  if (is->vis_texture)
1295  SDL_DestroyTexture(is->vis_texture);
1296  if (is->vid_texture)
1297  SDL_DestroyTexture(is->vid_texture);
1298  if (is->sub_texture)
1299  SDL_DestroyTexture(is->sub_texture);
1300  av_free(is);
1301 }
1302 
1303 static void do_exit(VideoState *is)
1304 {
1305  if (is) {
1306  stream_close(is);
1307  }
1308  if (renderer)
1309  SDL_DestroyRenderer(renderer);
1310  if (vk_renderer)
1312  if (window)
1313  SDL_DestroyWindow(window);
1314  uninit_opts();
1315  for (int i = 0; i < nb_vfilters; i++)
1323  if (show_status)
1324  printf("\n");
1325  SDL_Quit();
1326  av_log(NULL, AV_LOG_QUIET, "%s", "");
1327  exit(0);
1328 }
1329 
1330 static void sigterm_handler(int sig)
1331 {
1332  exit(123);
1333 }
1334 
1336 {
1337  SDL_Rect rect;
1338  int max_width = screen_width ? screen_width : INT_MAX;
1339  int max_height = screen_height ? screen_height : INT_MAX;
1340  if (max_width == INT_MAX && max_height == INT_MAX)
1341  max_height = height;
1342  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1343  default_width = rect.w;
1344  default_height = rect.h;
1345 }
1346 
1348 {
1349  int w,h;
1350 
1353 
1354  if (!window_title)
1356  SDL_SetWindowTitle(window, window_title);
1357 
1358  SDL_SetWindowSize(window, w, h);
1359  SDL_SetWindowPosition(window, screen_left, screen_top);
1360  if (is_full_screen)
1361  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1362  SDL_ShowWindow(window);
1363 
1364  is->width = w;
1365  is->height = h;
1366 
1367  return 0;
1368 }
1369 
1370 /* display the current picture, if any */
1372 {
1373  if (!is->width)
1374  video_open(is);
1375 
1376  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1377  SDL_RenderClear(renderer);
1378  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1380  else if (is->video_st)
1382  SDL_RenderPresent(renderer);
1383 }
1384 
1385 static double get_clock(Clock *c)
1386 {
1387  if (*c->queue_serial != c->serial)
1388  return NAN;
1389  if (c->paused) {
1390  return c->pts;
1391  } else {
1392  double time = av_gettime_relative() / 1000000.0;
1393  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1394  }
1395 }
1396 
1397 static void set_clock_at(Clock *c, double pts, int serial, double time)
1398 {
1399  c->pts = pts;
1400  c->last_updated = time;
1401  c->pts_drift = c->pts - time;
1402  c->serial = serial;
1403 }
1404 
1405 static void set_clock(Clock *c, double pts, int serial)
1406 {
1407  double time = av_gettime_relative() / 1000000.0;
1408  set_clock_at(c, pts, serial, time);
1409 }
1410 
1411 static void set_clock_speed(Clock *c, double speed)
1412 {
1413  set_clock(c, get_clock(c), c->serial);
1414  c->speed = speed;
1415 }
1416 
1417 static void init_clock(Clock *c, int *queue_serial)
1418 {
1419  c->speed = 1.0;
1420  c->paused = 0;
1421  c->queue_serial = queue_serial;
1422  set_clock(c, NAN, -1);
1423 }
1424 
1425 static void sync_clock_to_slave(Clock *c, Clock *slave)
1426 {
1427  double clock = get_clock(c);
1428  double slave_clock = get_clock(slave);
1429  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1430  set_clock(c, slave_clock, slave->serial);
1431 }
1432 
1434  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1435  if (is->video_st)
1436  return AV_SYNC_VIDEO_MASTER;
1437  else
1438  return AV_SYNC_AUDIO_MASTER;
1439  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1440  if (is->audio_st)
1441  return AV_SYNC_AUDIO_MASTER;
1442  else
1443  return AV_SYNC_EXTERNAL_CLOCK;
1444  } else {
1445  return AV_SYNC_EXTERNAL_CLOCK;
1446  }
1447 }
1448 
1449 /* get the current master clock value */
1451 {
1452  double val;
1453 
1454  switch (get_master_sync_type(is)) {
1455  case AV_SYNC_VIDEO_MASTER:
1456  val = get_clock(&is->vidclk);
1457  break;
1458  case AV_SYNC_AUDIO_MASTER:
1459  val = get_clock(&is->audclk);
1460  break;
1461  default:
1462  val = get_clock(&is->extclk);
1463  break;
1464  }
1465  return val;
1466 }
1467 
1469  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1470  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1472  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1473  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1475  } else {
1476  double speed = is->extclk.speed;
1477  if (speed != 1.0)
1478  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1479  }
1480 }
1481 
1482 /* seek in the stream */
1483 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
1484 {
1485  if (!is->seek_req) {
1486  is->seek_pos = pos;
1487  is->seek_rel = rel;
1488  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1489  if (by_bytes)
1490  is->seek_flags |= AVSEEK_FLAG_BYTE;
1491  is->seek_req = 1;
1492  SDL_CondSignal(is->continue_read_thread);
1493  }
1494 }
1495 
1496 /* pause or resume the video */
1498 {
1499  if (is->paused) {
1500  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1501  if (is->read_pause_return != AVERROR(ENOSYS)) {
1502  is->vidclk.paused = 0;
1503  }
1504  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1505  }
1506  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1507  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1508 }
1509 
1511 {
1513  is->step = 0;
1514 }
1515 
1517 {
1518  is->muted = !is->muted;
1519 }
1520 
1521 static void update_volume(VideoState *is, int sign, double step)
1522 {
1523  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1524  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1525  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1526 }
1527 
1529 {
1530  /* if the stream is paused unpause it, then step */
1531  if (is->paused)
1533  is->step = 1;
1534 }
1535 
1536 static double compute_target_delay(double delay, VideoState *is)
1537 {
1538  double sync_threshold, diff = 0;
1539 
1540  /* update delay to follow master synchronisation source */
1542  /* if video is slave, we try to correct big delays by
1543  duplicating or deleting a frame */
1544  diff = get_clock(&is->vidclk) - get_master_clock(is);
1545 
1546  /* skip or repeat frame. We take into account the
1547  delay to compute the threshold. I still don't know
1548  if it is the best guess */
1549  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1550  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1551  if (diff <= -sync_threshold)
1552  delay = FFMAX(0, delay + diff);
1553  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1554  delay = delay + diff;
1555  else if (diff >= sync_threshold)
1556  delay = 2 * delay;
1557  }
1558  }
1559 
1560  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1561  delay, -diff);
1562 
1563  return delay;
1564 }
1565 
1566 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1567  if (vp->serial == nextvp->serial) {
1568  double duration = nextvp->pts - vp->pts;
1569  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1570  return vp->duration;
1571  else
1572  return duration;
1573  } else {
1574  return 0.0;
1575  }
1576 }
1577 
1578 static void update_video_pts(VideoState *is, double pts, int serial)
1579 {
1580  /* update current video pts */
1581  set_clock(&is->vidclk, pts, serial);
1582  sync_clock_to_slave(&is->extclk, &is->vidclk);
1583 }
1584 
1585 /* called to display each frame */
1586 static void video_refresh(void *opaque, double *remaining_time)
1587 {
1588  VideoState *is = opaque;
1589  double time;
1590 
1591  Frame *sp, *sp2;
1592 
1593  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1595 
1596  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1597  time = av_gettime_relative() / 1000000.0;
1598  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1599  video_display(is);
1600  is->last_vis_time = time;
1601  }
1602  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1603  }
1604 
1605  if (is->video_st) {
1606 retry:
1607  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1608  // nothing to do, no picture to display in the queue
1609  } else {
1610  double last_duration, duration, delay;
1611  Frame *vp, *lastvp;
1612 
1613  /* dequeue the picture */
1614  lastvp = frame_queue_peek_last(&is->pictq);
1615  vp = frame_queue_peek(&is->pictq);
1616 
1617  if (vp->serial != is->videoq.serial) {
1618  frame_queue_next(&is->pictq);
1619  goto retry;
1620  }
1621 
1622  if (lastvp->serial != vp->serial)
1623  is->frame_timer = av_gettime_relative() / 1000000.0;
1624 
1625  if (is->paused)
1626  goto display;
1627 
1628  /* compute nominal last_duration */
1629  last_duration = vp_duration(is, lastvp, vp);
1630  delay = compute_target_delay(last_duration, is);
1631 
1632  time= av_gettime_relative()/1000000.0;
1633  if (time < is->frame_timer + delay) {
1634  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1635  goto display;
1636  }
1637 
1638  is->frame_timer += delay;
1639  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1640  is->frame_timer = time;
1641 
1642  SDL_LockMutex(is->pictq.mutex);
1643  if (!isnan(vp->pts))
1644  update_video_pts(is, vp->pts, vp->serial);
1645  SDL_UnlockMutex(is->pictq.mutex);
1646 
1647  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1648  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1649  duration = vp_duration(is, vp, nextvp);
1650  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1651  is->frame_drops_late++;
1652  frame_queue_next(&is->pictq);
1653  goto retry;
1654  }
1655  }
1656 
1657  if (is->subtitle_st) {
1658  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1659  sp = frame_queue_peek(&is->subpq);
1660 
1661  if (frame_queue_nb_remaining(&is->subpq) > 1)
1662  sp2 = frame_queue_peek_next(&is->subpq);
1663  else
1664  sp2 = NULL;
1665 
1666  if (sp->serial != is->subtitleq.serial
1667  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1668  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1669  {
1670  if (sp->uploaded) {
1671  int i;
1672  for (i = 0; i < sp->sub.num_rects; i++) {
1673  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1674  uint8_t *pixels;
1675  int pitch, j;
1676 
1677  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1678  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1679  memset(pixels, 0, sub_rect->w << 2);
1680  SDL_UnlockTexture(is->sub_texture);
1681  }
1682  }
1683  }
1684  frame_queue_next(&is->subpq);
1685  } else {
1686  break;
1687  }
1688  }
1689  }
1690 
1691  frame_queue_next(&is->pictq);
1692  is->force_refresh = 1;
1693 
1694  if (is->step && !is->paused)
1696  }
1697 display:
1698  /* display picture */
1699  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1700  video_display(is);
1701  }
1702  is->force_refresh = 0;
1703  if (show_status) {
1704  AVBPrint buf;
1705  static int64_t last_time;
1706  int64_t cur_time;
1707  int aqsize, vqsize, sqsize;
1708  double av_diff;
1709 
1710  cur_time = av_gettime_relative();
1711  if (!last_time || (cur_time - last_time) >= 30000) {
1712  aqsize = 0;
1713  vqsize = 0;
1714  sqsize = 0;
1715  if (is->audio_st)
1716  aqsize = is->audioq.size;
1717  if (is->video_st)
1718  vqsize = is->videoq.size;
1719  if (is->subtitle_st)
1720  sqsize = is->subtitleq.size;
1721  av_diff = 0;
1722  if (is->audio_st && is->video_st)
1723  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1724  else if (is->video_st)
1725  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1726  else if (is->audio_st)
1727  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1728 
1730  av_bprintf(&buf,
1731  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB \r",
1733  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1734  av_diff,
1735  is->frame_drops_early + is->frame_drops_late,
1736  aqsize / 1024,
1737  vqsize / 1024,
1738  sqsize);
1739 
1740  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1741  fprintf(stderr, "%s", buf.str);
1742  else
1743  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1744 
1745  fflush(stderr);
1746  av_bprint_finalize(&buf, NULL);
1747 
1748  last_time = cur_time;
1749  }
1750  }
1751 }
1752 
1753 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1754 {
1755  Frame *vp;
1756 
1757 #if defined(DEBUG_SYNC)
1758  printf("frame_type=%c pts=%0.3f\n",
1759  av_get_picture_type_char(src_frame->pict_type), pts);
1760 #endif
1761 
1762  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1763  return -1;
1764 
1765  vp->sar = src_frame->sample_aspect_ratio;
1766  vp->uploaded = 0;
1767 
1768  vp->width = src_frame->width;
1769  vp->height = src_frame->height;
1770  vp->format = src_frame->format;
1771 
1772  vp->pts = pts;
1773  vp->duration = duration;
1774  vp->pos = pos;
1775  vp->serial = serial;
1776 
1777  set_default_window_size(vp->width, vp->height, vp->sar);
1778 
1779  av_frame_move_ref(vp->frame, src_frame);
1780  frame_queue_push(&is->pictq);
1781  return 0;
1782 }
1783 
1785 {
1786  int got_picture;
1787 
1788  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1789  return -1;
1790 
1791  if (got_picture) {
1792  double dpts = NAN;
1793 
1794  if (frame->pts != AV_NOPTS_VALUE)
1795  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1796 
1797  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1798 
1800  if (frame->pts != AV_NOPTS_VALUE) {
1801  double diff = dpts - get_master_clock(is);
1802  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1803  diff - is->frame_last_filter_delay < 0 &&
1804  is->viddec.pkt_serial == is->vidclk.serial &&
1805  is->videoq.nb_packets) {
1806  is->frame_drops_early++;
1808  got_picture = 0;
1809  }
1810  }
1811  }
1812  }
1813 
1814  return got_picture;
1815 }
1816 
1817 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1818  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1819 {
1820  int ret, i;
1821  int nb_filters = graph->nb_filters;
1823 
1824  if (filtergraph) {
1827  if (!outputs || !inputs) {
1828  ret = AVERROR(ENOMEM);
1829  goto fail;
1830  }
1831 
1832  outputs->name = av_strdup("in");
1833  outputs->filter_ctx = source_ctx;
1834  outputs->pad_idx = 0;
1835  outputs->next = NULL;
1836 
1837  inputs->name = av_strdup("out");
1838  inputs->filter_ctx = sink_ctx;
1839  inputs->pad_idx = 0;
1840  inputs->next = NULL;
1841 
1842  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1843  goto fail;
1844  } else {
1845  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1846  goto fail;
1847  }
1848 
1849  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1850  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1851  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1852 
1853  ret = avfilter_graph_config(graph, NULL);
1854 fail:
1857  return ret;
1858 }
1859 
1860 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1861 {
1863  char sws_flags_str[512] = "";
1864  char buffersrc_args[256];
1865  int ret;
1866  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1867  AVCodecParameters *codecpar = is->video_st->codecpar;
1868  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1869  const AVDictionaryEntry *e = NULL;
1870  int nb_pix_fmts = 0;
1871  int i, j;
1873 
1874  if (!par)
1875  return AVERROR(ENOMEM);
1876 
1877  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1878  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1879  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1880  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1881  break;
1882  }
1883  }
1884  }
1885  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1886 
1887  while ((e = av_dict_iterate(sws_dict, e))) {
1888  if (!strcmp(e->key, "sws_flags")) {
1889  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1890  } else
1891  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1892  }
1893  if (strlen(sws_flags_str))
1894  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1895 
1896  graph->scale_sws_opts = av_strdup(sws_flags_str);
1897 
1898  snprintf(buffersrc_args, sizeof(buffersrc_args),
1899  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
1900  "colorspace=%d:range=%d",
1901  frame->width, frame->height, frame->format,
1902  is->video_st->time_base.num, is->video_st->time_base.den,
1903  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
1904  frame->colorspace, frame->color_range);
1905  if (fr.num && fr.den)
1906  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1907 
1908  if ((ret = avfilter_graph_create_filter(&filt_src,
1909  avfilter_get_by_name("buffer"),
1910  "ffplay_buffer", buffersrc_args, NULL,
1911  graph)) < 0)
1912  goto fail;
1913  par->hw_frames_ctx = frame->hw_frames_ctx;
1914  ret = av_buffersrc_parameters_set(filt_src, par);
1915  if (ret < 0)
1916  goto fail;
1917 
1918  ret = avfilter_graph_create_filter(&filt_out,
1919  avfilter_get_by_name("buffersink"),
1920  "ffplay_buffersink", NULL, NULL, graph);
1921  if (ret < 0)
1922  goto fail;
1923 
1924  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1925  goto fail;
1926  if (!vk_renderer &&
1928  goto fail;
1929 
1930  last_filter = filt_out;
1931 
1932 /* Note: this macro adds a filter before the lastly added filter, so the
1933  * processing order of the filters is in reverse */
1934 #define INSERT_FILT(name, arg) do { \
1935  AVFilterContext *filt_ctx; \
1936  \
1937  ret = avfilter_graph_create_filter(&filt_ctx, \
1938  avfilter_get_by_name(name), \
1939  "ffplay_" name, arg, NULL, graph); \
1940  if (ret < 0) \
1941  goto fail; \
1942  \
1943  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1944  if (ret < 0) \
1945  goto fail; \
1946  \
1947  last_filter = filt_ctx; \
1948 } while (0)
1949 
1950  if (autorotate) {
1951  double theta = 0.0;
1952  int32_t *displaymatrix = NULL;
1954  if (sd)
1955  displaymatrix = (int32_t *)sd->data;
1956  if (!displaymatrix) {
1957  const AVPacketSideData *psd = av_packet_side_data_get(is->video_st->codecpar->coded_side_data,
1958  is->video_st->codecpar->nb_coded_side_data,
1960  if (psd)
1961  displaymatrix = (int32_t *)psd->data;
1962  }
1963  theta = get_rotation(displaymatrix);
1964 
1965  if (fabs(theta - 90) < 1.0) {
1966  INSERT_FILT("transpose", "clock");
1967  } else if (fabs(theta - 180) < 1.0) {
1968  INSERT_FILT("hflip", NULL);
1969  INSERT_FILT("vflip", NULL);
1970  } else if (fabs(theta - 270) < 1.0) {
1971  INSERT_FILT("transpose", "cclock");
1972  } else if (fabs(theta) > 1.0) {
1973  char rotate_buf[64];
1974  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1975  INSERT_FILT("rotate", rotate_buf);
1976  }
1977  }
1978 
1979  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1980  goto fail;
1981 
1982  is->in_video_filter = filt_src;
1983  is->out_video_filter = filt_out;
1984 
1985 fail:
1986  av_freep(&par);
1987  return ret;
1988 }
1989 
1990 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1991 {
1993  int sample_rates[2] = { 0, -1 };
1994  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1995  char aresample_swr_opts[512] = "";
1996  const AVDictionaryEntry *e = NULL;
1997  AVBPrint bp;
1998  char asrc_args[256];
1999  int ret;
2000 
2001  avfilter_graph_free(&is->agraph);
2002  if (!(is->agraph = avfilter_graph_alloc()))
2003  return AVERROR(ENOMEM);
2004  is->agraph->nb_threads = filter_nbthreads;
2005 
2007 
2008  while ((e = av_dict_iterate(swr_opts, e)))
2009  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2010  if (strlen(aresample_swr_opts))
2011  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2012  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2013 
2014  av_channel_layout_describe_bprint(&is->audio_filter_src.ch_layout, &bp);
2015 
2016  ret = snprintf(asrc_args, sizeof(asrc_args),
2017  "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:channel_layout=%s",
2018  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
2019  1, is->audio_filter_src.freq, bp.str);
2020 
2021  ret = avfilter_graph_create_filter(&filt_asrc,
2022  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2023  asrc_args, NULL, is->agraph);
2024  if (ret < 0)
2025  goto end;
2026 
2027 
2028  ret = avfilter_graph_create_filter(&filt_asink,
2029  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2030  NULL, NULL, is->agraph);
2031  if (ret < 0)
2032  goto end;
2033 
2034  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2035  goto end;
2036  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2037  goto end;
2038 
2039  if (force_output_format) {
2040  av_bprint_clear(&bp);
2041  av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
2042  sample_rates [0] = is->audio_tgt.freq;
2043  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2044  goto end;
2045  if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
2046  goto end;
2047  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2048  goto end;
2049  }
2050 
2051 
2052  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2053  goto end;
2054 
2055  is->in_audio_filter = filt_asrc;
2056  is->out_audio_filter = filt_asink;
2057 
2058 end:
2059  if (ret < 0)
2060  avfilter_graph_free(&is->agraph);
2061  av_bprint_finalize(&bp, NULL);
2062 
2063  return ret;
2064 }
2065 
2066 static int audio_thread(void *arg)
2067 {
2068  VideoState *is = arg;
2070  Frame *af;
2071  int last_serial = -1;
2072  int reconfigure;
2073  int got_frame = 0;
2074  AVRational tb;
2075  int ret = 0;
2076 
2077  if (!frame)
2078  return AVERROR(ENOMEM);
2079 
2080  do {
2081  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2082  goto the_end;
2083 
2084  if (got_frame) {
2085  tb = (AVRational){1, frame->sample_rate};
2086 
2087  reconfigure =
2088  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
2089  frame->format, frame->ch_layout.nb_channels) ||
2090  av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2091  is->audio_filter_src.freq != frame->sample_rate ||
2092  is->auddec.pkt_serial != last_serial;
2093 
2094  if (reconfigure) {
2095  char buf1[1024], buf2[1024];
2096  av_channel_layout_describe(&is->audio_filter_src.ch_layout, buf1, sizeof(buf1));
2097  av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2099  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2100  is->audio_filter_src.freq, is->audio_filter_src.ch_layout.nb_channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2101  frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2102 
2103  is->audio_filter_src.fmt = frame->format;
2104  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2105  if (ret < 0)
2106  goto the_end;
2107  is->audio_filter_src.freq = frame->sample_rate;
2108  last_serial = is->auddec.pkt_serial;
2109 
2110  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2111  goto the_end;
2112  }
2113 
2114  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2115  goto the_end;
2116 
2117  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2118  FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2119  tb = av_buffersink_get_time_base(is->out_audio_filter);
2120  if (!(af = frame_queue_peek_writable(&is->sampq)))
2121  goto the_end;
2122 
2123  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2124  af->pos = fd ? fd->pkt_pos : -1;
2125  af->serial = is->auddec.pkt_serial;
2126  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2127 
2129  frame_queue_push(&is->sampq);
2130 
2131  if (is->audioq.serial != is->auddec.pkt_serial)
2132  break;
2133  }
2134  if (ret == AVERROR_EOF)
2135  is->auddec.finished = is->auddec.pkt_serial;
2136  }
2137  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2138  the_end:
2139  avfilter_graph_free(&is->agraph);
2140  av_frame_free(&frame);
2141  return ret;
2142 }
2143 
2144 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2145 {
2146  packet_queue_start(d->queue);
2147  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2148  if (!d->decoder_tid) {
2149  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2150  return AVERROR(ENOMEM);
2151  }
2152  return 0;
2153 }
2154 
2155 static int video_thread(void *arg)
2156 {
2157  VideoState *is = arg;
2159  double pts;
2160  double duration;
2161  int ret;
2162  AVRational tb = is->video_st->time_base;
2163  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2164 
2165  AVFilterGraph *graph = NULL;
2166  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2167  int last_w = 0;
2168  int last_h = 0;
2169  enum AVPixelFormat last_format = -2;
2170  int last_serial = -1;
2171  int last_vfilter_idx = 0;
2172 
2173  if (!frame)
2174  return AVERROR(ENOMEM);
2175 
2176  for (;;) {
2178  if (ret < 0)
2179  goto the_end;
2180  if (!ret)
2181  continue;
2182 
2183  if ( last_w != frame->width
2184  || last_h != frame->height
2185  || last_format != frame->format
2186  || last_serial != is->viddec.pkt_serial
2187  || last_vfilter_idx != is->vfilter_idx) {
2189  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2190  last_w, last_h,
2191  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2192  frame->width, frame->height,
2193  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2194  avfilter_graph_free(&graph);
2195  graph = avfilter_graph_alloc();
2196  if (!graph) {
2197  ret = AVERROR(ENOMEM);
2198  goto the_end;
2199  }
2200  graph->nb_threads = filter_nbthreads;
2201  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2202  SDL_Event event;
2203  event.type = FF_QUIT_EVENT;
2204  event.user.data1 = is;
2205  SDL_PushEvent(&event);
2206  goto the_end;
2207  }
2208  filt_in = is->in_video_filter;
2209  filt_out = is->out_video_filter;
2210  last_w = frame->width;
2211  last_h = frame->height;
2212  last_format = frame->format;
2213  last_serial = is->viddec.pkt_serial;
2214  last_vfilter_idx = is->vfilter_idx;
2215  frame_rate = av_buffersink_get_frame_rate(filt_out);
2216  }
2217 
2218  ret = av_buffersrc_add_frame(filt_in, frame);
2219  if (ret < 0)
2220  goto the_end;
2221 
2222  while (ret >= 0) {
2223  FrameData *fd;
2224 
2225  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2226 
2227  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2228  if (ret < 0) {
2229  if (ret == AVERROR_EOF)
2230  is->viddec.finished = is->viddec.pkt_serial;
2231  ret = 0;
2232  break;
2233  }
2234 
2235  fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
2236 
2237  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2238  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2239  is->frame_last_filter_delay = 0;
2240  tb = av_buffersink_get_time_base(filt_out);
2241  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2242  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2243  ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
2245  if (is->videoq.serial != is->viddec.pkt_serial)
2246  break;
2247  }
2248 
2249  if (ret < 0)
2250  goto the_end;
2251  }
2252  the_end:
2253  avfilter_graph_free(&graph);
2254  av_frame_free(&frame);
2255  return 0;
2256 }
2257 
2258 static int subtitle_thread(void *arg)
2259 {
2260  VideoState *is = arg;
2261  Frame *sp;
2262  int got_subtitle;
2263  double pts;
2264 
2265  for (;;) {
2266  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2267  return 0;
2268 
2269  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2270  break;
2271 
2272  pts = 0;
2273 
2274  if (got_subtitle && sp->sub.format == 0) {
2275  if (sp->sub.pts != AV_NOPTS_VALUE)
2276  pts = sp->sub.pts / (double)AV_TIME_BASE;
2277  sp->pts = pts;
2278  sp->serial = is->subdec.pkt_serial;
2279  sp->width = is->subdec.avctx->width;
2280  sp->height = is->subdec.avctx->height;
2281  sp->uploaded = 0;
2282 
2283  /* now we can update the picture count */
2284  frame_queue_push(&is->subpq);
2285  } else if (got_subtitle) {
2286  avsubtitle_free(&sp->sub);
2287  }
2288  }
2289  return 0;
2290 }
2291 
2292 /* copy samples for viewing in editor window */
2293 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2294 {
2295  int size, len;
2296 
2297  size = samples_size / sizeof(short);
2298  while (size > 0) {
2299  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2300  if (len > size)
2301  len = size;
2302  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2303  samples += len;
2304  is->sample_array_index += len;
2305  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2306  is->sample_array_index = 0;
2307  size -= len;
2308  }
2309 }
2310 
2311 /* return the wanted number of samples to get better sync if sync_type is video
2312  * or external master clock */
2313 static int synchronize_audio(VideoState *is, int nb_samples)
2314 {
2315  int wanted_nb_samples = nb_samples;
2316 
2317  /* if not master, then we try to remove or add samples to correct the clock */
2319  double diff, avg_diff;
2320  int min_nb_samples, max_nb_samples;
2321 
2322  diff = get_clock(&is->audclk) - get_master_clock(is);
2323 
2324  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2325  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2326  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2327  /* not enough measures to have a correct estimate */
2328  is->audio_diff_avg_count++;
2329  } else {
2330  /* estimate the A-V difference */
2331  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2332 
2333  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2334  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2335  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2336  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2337  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2338  }
2339  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2340  diff, avg_diff, wanted_nb_samples - nb_samples,
2341  is->audio_clock, is->audio_diff_threshold);
2342  }
2343  } else {
2344  /* too big difference : may be initial PTS errors, so
2345  reset A-V filter */
2346  is->audio_diff_avg_count = 0;
2347  is->audio_diff_cum = 0;
2348  }
2349  }
2350 
2351  return wanted_nb_samples;
2352 }
2353 
2354 /**
2355  * Decode one audio frame and return its uncompressed size.
2356  *
2357  * The processed audio frame is decoded, converted if required, and
2358  * stored in is->audio_buf, with size in bytes given by the return
2359  * value.
2360  */
2362 {
2363  int data_size, resampled_data_size;
2364  av_unused double audio_clock0;
2365  int wanted_nb_samples;
2366  Frame *af;
2367 
2368  if (is->paused)
2369  return -1;
2370 
2371  do {
2372 #if defined(_WIN32)
2373  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2374  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2375  return -1;
2376  av_usleep (1000);
2377  }
2378 #endif
2379  if (!(af = frame_queue_peek_readable(&is->sampq)))
2380  return -1;
2381  frame_queue_next(&is->sampq);
2382  } while (af->serial != is->audioq.serial);
2383 
2385  af->frame->nb_samples,
2386  af->frame->format, 1);
2387 
2388  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2389 
2390  if (af->frame->format != is->audio_src.fmt ||
2391  av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2392  af->frame->sample_rate != is->audio_src.freq ||
2393  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2394  swr_free(&is->swr_ctx);
2395  swr_alloc_set_opts2(&is->swr_ctx,
2396  &is->audio_tgt.ch_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2397  &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2398  0, NULL);
2399  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2401  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2403  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.ch_layout.nb_channels);
2404  swr_free(&is->swr_ctx);
2405  return -1;
2406  }
2407  if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2408  return -1;
2409  is->audio_src.freq = af->frame->sample_rate;
2410  is->audio_src.fmt = af->frame->format;
2411  }
2412 
2413  if (is->swr_ctx) {
2414  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2415  uint8_t **out = &is->audio_buf1;
2416  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2417  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.ch_layout.nb_channels, out_count, is->audio_tgt.fmt, 0);
2418  int len2;
2419  if (out_size < 0) {
2420  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2421  return -1;
2422  }
2423  if (wanted_nb_samples != af->frame->nb_samples) {
2424  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2425  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2426  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2427  return -1;
2428  }
2429  }
2430  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2431  if (!is->audio_buf1)
2432  return AVERROR(ENOMEM);
2433  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2434  if (len2 < 0) {
2435  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2436  return -1;
2437  }
2438  if (len2 == out_count) {
2439  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2440  if (swr_init(is->swr_ctx) < 0)
2441  swr_free(&is->swr_ctx);
2442  }
2443  is->audio_buf = is->audio_buf1;
2444  resampled_data_size = len2 * is->audio_tgt.ch_layout.nb_channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2445  } else {
2446  is->audio_buf = af->frame->data[0];
2447  resampled_data_size = data_size;
2448  }
2449 
2450  audio_clock0 = is->audio_clock;
2451  /* update the audio clock with the pts */
2452  if (!isnan(af->pts))
2453  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2454  else
2455  is->audio_clock = NAN;
2456  is->audio_clock_serial = af->serial;
2457 #ifdef DEBUG
2458  {
2459  static double last_clock;
2460  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2461  is->audio_clock - last_clock,
2462  is->audio_clock, audio_clock0);
2463  last_clock = is->audio_clock;
2464  }
2465 #endif
2466  return resampled_data_size;
2467 }
2468 
2469 /* prepare a new audio buffer */
2470 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2471 {
2472  VideoState *is = opaque;
2473  int audio_size, len1;
2474 
2476 
2477  while (len > 0) {
2478  if (is->audio_buf_index >= is->audio_buf_size) {
2479  audio_size = audio_decode_frame(is);
2480  if (audio_size < 0) {
2481  /* if error, just output silence */
2482  is->audio_buf = NULL;
2483  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2484  } else {
2485  if (is->show_mode != SHOW_MODE_VIDEO)
2486  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2487  is->audio_buf_size = audio_size;
2488  }
2489  is->audio_buf_index = 0;
2490  }
2491  len1 = is->audio_buf_size - is->audio_buf_index;
2492  if (len1 > len)
2493  len1 = len;
2494  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2495  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2496  else {
2497  memset(stream, 0, len1);
2498  if (!is->muted && is->audio_buf)
2499  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2500  }
2501  len -= len1;
2502  stream += len1;
2503  is->audio_buf_index += len1;
2504  }
2505  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2506  /* Let's assume the audio driver that is used by SDL has two periods. */
2507  if (!isnan(is->audio_clock)) {
2508  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2509  sync_clock_to_slave(&is->extclk, &is->audclk);
2510  }
2511 }
2512 
2513 static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2514 {
2515  SDL_AudioSpec wanted_spec, spec;
2516  const char *env;
2517  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2518  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2519  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2520  int wanted_nb_channels = wanted_channel_layout->nb_channels;
2521 
2522  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2523  if (env) {
2524  wanted_nb_channels = atoi(env);
2525  av_channel_layout_uninit(wanted_channel_layout);
2526  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2527  }
2528  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2529  av_channel_layout_uninit(wanted_channel_layout);
2530  av_channel_layout_default(wanted_channel_layout, wanted_nb_channels);
2531  }
2532  wanted_nb_channels = wanted_channel_layout->nb_channels;
2533  wanted_spec.channels = wanted_nb_channels;
2534  wanted_spec.freq = wanted_sample_rate;
2535  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2536  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2537  return -1;
2538  }
2539  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2540  next_sample_rate_idx--;
2541  wanted_spec.format = AUDIO_S16SYS;
2542  wanted_spec.silence = 0;
2543  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2544  wanted_spec.callback = sdl_audio_callback;
2545  wanted_spec.userdata = opaque;
2546  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2547  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2548  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2549  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2550  if (!wanted_spec.channels) {
2551  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2552  wanted_spec.channels = wanted_nb_channels;
2553  if (!wanted_spec.freq) {
2555  "No more combinations to try, audio open failed\n");
2556  return -1;
2557  }
2558  }
2559  av_channel_layout_default(wanted_channel_layout, wanted_spec.channels);
2560  }
2561  if (spec.format != AUDIO_S16SYS) {
2563  "SDL advised audio format %d is not supported!\n", spec.format);
2564  return -1;
2565  }
2566  if (spec.channels != wanted_spec.channels) {
2567  av_channel_layout_uninit(wanted_channel_layout);
2568  av_channel_layout_default(wanted_channel_layout, spec.channels);
2569  if (wanted_channel_layout->order != AV_CHANNEL_ORDER_NATIVE) {
2571  "SDL advised channel count %d is not supported!\n", spec.channels);
2572  return -1;
2573  }
2574  }
2575 
2576  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2577  audio_hw_params->freq = spec.freq;
2578  if (av_channel_layout_copy(&audio_hw_params->ch_layout, wanted_channel_layout) < 0)
2579  return -1;
2580  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, 1, audio_hw_params->fmt, 1);
2581  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->ch_layout.nb_channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2582  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2583  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2584  return -1;
2585  }
2586  return spec.size;
2587 }
2588 
2589 static int create_hwaccel(AVBufferRef **device_ctx)
2590 {
2591  enum AVHWDeviceType type;
2592  int ret;
2593  AVBufferRef *vk_dev;
2594 
2595  *device_ctx = NULL;
2596 
2597  if (!hwaccel)
2598  return 0;
2599 
2601  if (type == AV_HWDEVICE_TYPE_NONE)
2602  return AVERROR(ENOTSUP);
2603 
2605  if (ret < 0)
2606  return ret;
2607 
2608  ret = av_hwdevice_ctx_create_derived(device_ctx, type, vk_dev, 0);
2609  if (!ret)
2610  return 0;
2611 
2612  if (ret != AVERROR(ENOSYS))
2613  return ret;
2614 
2615  av_log(NULL, AV_LOG_WARNING, "Derive %s from vulkan not supported.\n", hwaccel);
2616  ret = av_hwdevice_ctx_create(device_ctx, type, NULL, NULL, 0);
2617  return ret;
2618 }
2619 
2620 /* open a given stream. Return 0 if OK */
2621 static int stream_component_open(VideoState *is, int stream_index)
2622 {
2623  AVFormatContext *ic = is->ic;
2624  AVCodecContext *avctx;
2625  const AVCodec *codec;
2626  const char *forced_codec_name = NULL;
2627  AVDictionary *opts = NULL;
2628  const AVDictionaryEntry *t = NULL;
2629  int sample_rate;
2630  AVChannelLayout ch_layout = { 0 };
2631  int ret = 0;
2632  int stream_lowres = lowres;
2633 
2634  if (stream_index < 0 || stream_index >= ic->nb_streams)
2635  return -1;
2636 
2637  avctx = avcodec_alloc_context3(NULL);
2638  if (!avctx)
2639  return AVERROR(ENOMEM);
2640 
2641  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2642  if (ret < 0)
2643  goto fail;
2644  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2645 
2646  codec = avcodec_find_decoder(avctx->codec_id);
2647 
2648  switch(avctx->codec_type){
2649  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2650  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2651  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2652  }
2653  if (forced_codec_name)
2654  codec = avcodec_find_decoder_by_name(forced_codec_name);
2655  if (!codec) {
2656  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2657  "No codec could be found with name '%s'\n", forced_codec_name);
2658  else av_log(NULL, AV_LOG_WARNING,
2659  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2660  ret = AVERROR(EINVAL);
2661  goto fail;
2662  }
2663 
2664  avctx->codec_id = codec->id;
2665  if (stream_lowres > codec->max_lowres) {
2666  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2667  codec->max_lowres);
2668  stream_lowres = codec->max_lowres;
2669  }
2670  avctx->lowres = stream_lowres;
2671 
2672  if (fast)
2673  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2674 
2675  ret = filter_codec_opts(codec_opts, avctx->codec_id, ic,
2676  ic->streams[stream_index], codec, &opts);
2677  if (ret < 0)
2678  goto fail;
2679 
2680  if (!av_dict_get(opts, "threads", NULL, 0))
2681  av_dict_set(&opts, "threads", "auto", 0);
2682  if (stream_lowres)
2683  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2684 
2685  av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY);
2686 
2687  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2688  ret = create_hwaccel(&avctx->hw_device_ctx);
2689  if (ret < 0)
2690  goto fail;
2691  }
2692 
2693  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2694  goto fail;
2695  }
2696  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2697  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2699  goto fail;
2700  }
2701 
2702  is->eof = 0;
2703  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2704  switch (avctx->codec_type) {
2705  case AVMEDIA_TYPE_AUDIO:
2706  {
2707  AVFilterContext *sink;
2708 
2709  is->audio_filter_src.freq = avctx->sample_rate;
2710  ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &avctx->ch_layout);
2711  if (ret < 0)
2712  goto fail;
2713  is->audio_filter_src.fmt = avctx->sample_fmt;
2714  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2715  goto fail;
2716  sink = is->out_audio_filter;
2718  ret = av_buffersink_get_ch_layout(sink, &ch_layout);
2719  if (ret < 0)
2720  goto fail;
2721  }
2722 
2723  /* prepare audio output */
2724  if ((ret = audio_open(is, &ch_layout, sample_rate, &is->audio_tgt)) < 0)
2725  goto fail;
2726  is->audio_hw_buf_size = ret;
2727  is->audio_src = is->audio_tgt;
2728  is->audio_buf_size = 0;
2729  is->audio_buf_index = 0;
2730 
2731  /* init averaging filter */
2732  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2733  is->audio_diff_avg_count = 0;
2734  /* since we do not have a precise anough audio FIFO fullness,
2735  we correct audio sync only if larger than this threshold */
2736  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2737 
2738  is->audio_stream = stream_index;
2739  is->audio_st = ic->streams[stream_index];
2740 
2741  if ((ret = decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread)) < 0)
2742  goto fail;
2743  if (is->ic->iformat->flags & AVFMT_NOTIMESTAMPS) {
2744  is->auddec.start_pts = is->audio_st->start_time;
2745  is->auddec.start_pts_tb = is->audio_st->time_base;
2746  }
2747  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2748  goto out;
2749  SDL_PauseAudioDevice(audio_dev, 0);
2750  break;
2751  case AVMEDIA_TYPE_VIDEO:
2752  is->video_stream = stream_index;
2753  is->video_st = ic->streams[stream_index];
2754 
2755  if ((ret = decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread)) < 0)
2756  goto fail;
2757  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2758  goto out;
2759  is->queue_attachments_req = 1;
2760  break;
2761  case AVMEDIA_TYPE_SUBTITLE:
2762  is->subtitle_stream = stream_index;
2763  is->subtitle_st = ic->streams[stream_index];
2764 
2765  if ((ret = decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread)) < 0)
2766  goto fail;
2767  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2768  goto out;
2769  break;
2770  default:
2771  break;
2772  }
2773  goto out;
2774 
2775 fail:
2776  avcodec_free_context(&avctx);
2777 out:
2778  av_channel_layout_uninit(&ch_layout);
2779  av_dict_free(&opts);
2780 
2781  return ret;
2782 }
2783 
2784 static int decode_interrupt_cb(void *ctx)
2785 {
2786  VideoState *is = ctx;
2787  return is->abort_request;
2788 }
2789 
2790 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2791  return stream_id < 0 ||
2792  queue->abort_request ||
2794  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2795 }
2796 
2798 {
2799  if( !strcmp(s->iformat->name, "rtp")
2800  || !strcmp(s->iformat->name, "rtsp")
2801  || !strcmp(s->iformat->name, "sdp")
2802  )
2803  return 1;
2804 
2805  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2806  || !strncmp(s->url, "udp:", 4)
2807  )
2808  )
2809  return 1;
2810  return 0;
2811 }
2812 
2813 /* this thread gets the stream from the disk or the network */
2814 static int read_thread(void *arg)
2815 {
2816  VideoState *is = arg;
2817  AVFormatContext *ic = NULL;
2818  int err, i, ret;
2819  int st_index[AVMEDIA_TYPE_NB];
2820  AVPacket *pkt = NULL;
2821  int64_t stream_start_time;
2822  int pkt_in_play_range = 0;
2823  const AVDictionaryEntry *t;
2824  SDL_mutex *wait_mutex = SDL_CreateMutex();
2825  int scan_all_pmts_set = 0;
2826  int64_t pkt_ts;
2827 
2828  if (!wait_mutex) {
2829  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2830  ret = AVERROR(ENOMEM);
2831  goto fail;
2832  }
2833 
2834  memset(st_index, -1, sizeof(st_index));
2835  is->eof = 0;
2836 
2837  pkt = av_packet_alloc();
2838  if (!pkt) {
2839  av_log(NULL, AV_LOG_FATAL, "Could not allocate packet.\n");
2840  ret = AVERROR(ENOMEM);
2841  goto fail;
2842  }
2843  ic = avformat_alloc_context();
2844  if (!ic) {
2845  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2846  ret = AVERROR(ENOMEM);
2847  goto fail;
2848  }
2851  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2852  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2853  scan_all_pmts_set = 1;
2854  }
2855  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2856  if (err < 0) {
2857  print_error(is->filename, err);
2858  ret = -1;
2859  goto fail;
2860  }
2861  if (scan_all_pmts_set)
2862  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2863 
2865  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2867  goto fail;
2868  }
2869  is->ic = ic;
2870 
2871  if (genpts)
2872  ic->flags |= AVFMT_FLAG_GENPTS;
2873 
2874  if (find_stream_info) {
2875  AVDictionary **opts;
2876  int orig_nb_streams = ic->nb_streams;
2877 
2879  if (err < 0) {
2881  "Error setting up avformat_find_stream_info() options\n");
2882  ret = err;
2883  goto fail;
2884  }
2885 
2886  err = avformat_find_stream_info(ic, opts);
2887 
2888  for (i = 0; i < orig_nb_streams; i++)
2889  av_dict_free(&opts[i]);
2890  av_freep(&opts);
2891 
2892  if (err < 0) {
2894  "%s: could not find codec parameters\n", is->filename);
2895  ret = -1;
2896  goto fail;
2897  }
2898  }
2899 
2900  if (ic->pb)
2901  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2902 
2903  if (seek_by_bytes < 0)
2905  !!(ic->iformat->flags & AVFMT_TS_DISCONT) &&
2906  strcmp("ogg", ic->iformat->name);
2907 
2908  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2909 
2910  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2911  window_title = av_asprintf("%s - %s", t->value, input_filename);
2912 
2913  /* if seeking requested, we execute it */
2914  if (start_time != AV_NOPTS_VALUE) {
2915  int64_t timestamp;
2916 
2917  timestamp = start_time;
2918  /* add the stream start time */
2919  if (ic->start_time != AV_NOPTS_VALUE)
2920  timestamp += ic->start_time;
2921  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2922  if (ret < 0) {
2923  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2924  is->filename, (double)timestamp / AV_TIME_BASE);
2925  }
2926  }
2927 
2928  is->realtime = is_realtime(ic);
2929 
2930  if (show_status)
2931  av_dump_format(ic, 0, is->filename, 0);
2932 
2933  for (i = 0; i < ic->nb_streams; i++) {
2934  AVStream *st = ic->streams[i];
2935  enum AVMediaType type = st->codecpar->codec_type;
2936  st->discard = AVDISCARD_ALL;
2937  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2939  st_index[type] = i;
2940  }
2941  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2942  if (wanted_stream_spec[i] && st_index[i] == -1) {
2943  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2944  st_index[i] = INT_MAX;
2945  }
2946  }
2947 
2948  if (!video_disable)
2949  st_index[AVMEDIA_TYPE_VIDEO] =
2951  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2952  if (!audio_disable)
2953  st_index[AVMEDIA_TYPE_AUDIO] =
2955  st_index[AVMEDIA_TYPE_AUDIO],
2956  st_index[AVMEDIA_TYPE_VIDEO],
2957  NULL, 0);
2959  st_index[AVMEDIA_TYPE_SUBTITLE] =
2961  st_index[AVMEDIA_TYPE_SUBTITLE],
2962  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2963  st_index[AVMEDIA_TYPE_AUDIO] :
2964  st_index[AVMEDIA_TYPE_VIDEO]),
2965  NULL, 0);
2966 
2967  is->show_mode = show_mode;
2968  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2969  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2970  AVCodecParameters *codecpar = st->codecpar;
2972  if (codecpar->width)
2973  set_default_window_size(codecpar->width, codecpar->height, sar);
2974  }
2975 
2976  /* open the streams */
2977  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2979  }
2980 
2981  ret = -1;
2982  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2984  }
2985  if (is->show_mode == SHOW_MODE_NONE)
2986  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2987 
2988  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2990  }
2991 
2992  if (is->video_stream < 0 && is->audio_stream < 0) {
2993  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2994  is->filename);
2995  ret = -1;
2996  goto fail;
2997  }
2998 
2999  if (infinite_buffer < 0 && is->realtime)
3000  infinite_buffer = 1;
3001 
3002  for (;;) {
3003  if (is->abort_request)
3004  break;
3005  if (is->paused != is->last_paused) {
3006  is->last_paused = is->paused;
3007  if (is->paused)
3008  is->read_pause_return = av_read_pause(ic);
3009  else
3010  av_read_play(ic);
3011  }
3012 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3013  if (is->paused &&
3014  (!strcmp(ic->iformat->name, "rtsp") ||
3015  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3016  /* wait 10 ms to avoid trying to get another packet */
3017  /* XXX: horrible */
3018  SDL_Delay(10);
3019  continue;
3020  }
3021 #endif
3022  if (is->seek_req) {
3023  int64_t seek_target = is->seek_pos;
3024  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3025  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3026 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3027 // of the seek_pos/seek_rel variables
3028 
3029  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3030  if (ret < 0) {
3032  "%s: error while seeking\n", is->ic->url);
3033  } else {
3034  if (is->audio_stream >= 0)
3035  packet_queue_flush(&is->audioq);
3036  if (is->subtitle_stream >= 0)
3037  packet_queue_flush(&is->subtitleq);
3038  if (is->video_stream >= 0)
3039  packet_queue_flush(&is->videoq);
3040  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3041  set_clock(&is->extclk, NAN, 0);
3042  } else {
3043  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3044  }
3045  }
3046  is->seek_req = 0;
3047  is->queue_attachments_req = 1;
3048  is->eof = 0;
3049  if (is->paused)
3051  }
3052  if (is->queue_attachments_req) {
3053  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
3054  if ((ret = av_packet_ref(pkt, &is->video_st->attached_pic)) < 0)
3055  goto fail;
3056  packet_queue_put(&is->videoq, pkt);
3057  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3058  }
3059  is->queue_attachments_req = 0;
3060  }
3061 
3062  /* if the queue are full, no need to read more */
3063  if (infinite_buffer<1 &&
3064  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3065  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3066  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3067  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3068  /* wait 10 ms */
3069  SDL_LockMutex(wait_mutex);
3070  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3071  SDL_UnlockMutex(wait_mutex);
3072  continue;
3073  }
3074  if (!is->paused &&
3075  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3076  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3077  if (loop != 1 && (!loop || --loop)) {
3079  } else if (autoexit) {
3080  ret = AVERROR_EOF;
3081  goto fail;
3082  }
3083  }
3084  ret = av_read_frame(ic, pkt);
3085  if (ret < 0) {
3086  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3087  if (is->video_stream >= 0)
3088  packet_queue_put_nullpacket(&is->videoq, pkt, is->video_stream);
3089  if (is->audio_stream >= 0)
3090  packet_queue_put_nullpacket(&is->audioq, pkt, is->audio_stream);
3091  if (is->subtitle_stream >= 0)
3092  packet_queue_put_nullpacket(&is->subtitleq, pkt, is->subtitle_stream);
3093  is->eof = 1;
3094  }
3095  if (ic->pb && ic->pb->error) {
3096  if (autoexit)
3097  goto fail;
3098  else
3099  break;
3100  }
3101  SDL_LockMutex(wait_mutex);
3102  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3103  SDL_UnlockMutex(wait_mutex);
3104  continue;
3105  } else {
3106  is->eof = 0;
3107  }
3108  /* check if packet is in play range specified by user, then queue, otherwise discard */
3109  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3110  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3111  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3112  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3114  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3115  <= ((double)duration / 1000000);
3116  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3117  packet_queue_put(&is->audioq, pkt);
3118  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3119  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3120  packet_queue_put(&is->videoq, pkt);
3121  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3122  packet_queue_put(&is->subtitleq, pkt);
3123  } else {
3125  }
3126  }
3127 
3128  ret = 0;
3129  fail:
3130  if (ic && !is->ic)
3131  avformat_close_input(&ic);
3132 
3133  av_packet_free(&pkt);
3134  if (ret != 0) {
3135  SDL_Event event;
3136 
3137  event.type = FF_QUIT_EVENT;
3138  event.user.data1 = is;
3139  SDL_PushEvent(&event);
3140  }
3141  SDL_DestroyMutex(wait_mutex);
3142  return 0;
3143 }
3144 
3145 static VideoState *stream_open(const char *filename,
3146  const AVInputFormat *iformat)
3147 {
3148  VideoState *is;
3149 
3150  is = av_mallocz(sizeof(VideoState));
3151  if (!is)
3152  return NULL;
3153  is->last_video_stream = is->video_stream = -1;
3154  is->last_audio_stream = is->audio_stream = -1;
3155  is->last_subtitle_stream = is->subtitle_stream = -1;
3156  is->filename = av_strdup(filename);
3157  if (!is->filename)
3158  goto fail;
3159  is->iformat = iformat;
3160  is->ytop = 0;
3161  is->xleft = 0;
3162 
3163  /* start video display */
3164  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3165  goto fail;
3166  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3167  goto fail;
3168  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3169  goto fail;
3170 
3171  if (packet_queue_init(&is->videoq) < 0 ||
3172  packet_queue_init(&is->audioq) < 0 ||
3173  packet_queue_init(&is->subtitleq) < 0)
3174  goto fail;
3175 
3176  if (!(is->continue_read_thread = SDL_CreateCond())) {
3177  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3178  goto fail;
3179  }
3180 
3181  init_clock(&is->vidclk, &is->videoq.serial);
3182  init_clock(&is->audclk, &is->audioq.serial);
3183  init_clock(&is->extclk, &is->extclk.serial);
3184  is->audio_clock_serial = -1;
3185  if (startup_volume < 0)
3186  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3187  if (startup_volume > 100)
3188  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3190  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3191  is->audio_volume = startup_volume;
3192  is->muted = 0;
3193  is->av_sync_type = av_sync_type;
3194  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3195  if (!is->read_tid) {
3196  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3197 fail:
3198  stream_close(is);
3199  return NULL;
3200  }
3201  return is;
3202 }
3203 
3205 {
3206  AVFormatContext *ic = is->ic;
3207  int start_index, stream_index;
3208  int old_index;
3209  AVStream *st;
3210  AVProgram *p = NULL;
3211  int nb_streams = is->ic->nb_streams;
3212 
3213  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3214  start_index = is->last_video_stream;
3215  old_index = is->video_stream;
3216  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3217  start_index = is->last_audio_stream;
3218  old_index = is->audio_stream;
3219  } else {
3220  start_index = is->last_subtitle_stream;
3221  old_index = is->subtitle_stream;
3222  }
3223  stream_index = start_index;
3224 
3225  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3226  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3227  if (p) {
3229  for (start_index = 0; start_index < nb_streams; start_index++)
3230  if (p->stream_index[start_index] == stream_index)
3231  break;
3232  if (start_index == nb_streams)
3233  start_index = -1;
3234  stream_index = start_index;
3235  }
3236  }
3237 
3238  for (;;) {
3239  if (++stream_index >= nb_streams)
3240  {
3242  {
3243  stream_index = -1;
3244  is->last_subtitle_stream = -1;
3245  goto the_end;
3246  }
3247  if (start_index == -1)
3248  return;
3249  stream_index = 0;
3250  }
3251  if (stream_index == start_index)
3252  return;
3253  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3254  if (st->codecpar->codec_type == codec_type) {
3255  /* check that parameters are OK */
3256  switch (codec_type) {
3257  case AVMEDIA_TYPE_AUDIO:
3258  if (st->codecpar->sample_rate != 0 &&
3259  st->codecpar->ch_layout.nb_channels != 0)
3260  goto the_end;
3261  break;
3262  case AVMEDIA_TYPE_VIDEO:
3263  case AVMEDIA_TYPE_SUBTITLE:
3264  goto the_end;
3265  default:
3266  break;
3267  }
3268  }
3269  }
3270  the_end:
3271  if (p && stream_index != -1)
3272  stream_index = p->stream_index[stream_index];
3273  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3275  old_index,
3276  stream_index);
3277 
3278  stream_component_close(is, old_index);
3279  stream_component_open(is, stream_index);
3280 }
3281 
3282 
3284 {
3286  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3287 }
3288 
3290 {
3291  int next = is->show_mode;
3292  do {
3293  next = (next + 1) % SHOW_MODE_NB;
3294  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3295  if (is->show_mode != next) {
3296  is->force_refresh = 1;
3297  is->show_mode = next;
3298  }
3299 }
3300 
3301 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3302  double remaining_time = 0.0;
3303  SDL_PumpEvents();
3304  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3306  SDL_ShowCursor(0);
3307  cursor_hidden = 1;
3308  }
3309  if (remaining_time > 0.0)
3310  av_usleep((int64_t)(remaining_time * 1000000.0));
3311  remaining_time = REFRESH_RATE;
3312  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3313  video_refresh(is, &remaining_time);
3314  SDL_PumpEvents();
3315  }
3316 }
3317 
3318 static void seek_chapter(VideoState *is, int incr)
3319 {
3320  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3321  int i;
3322 
3323  if (!is->ic->nb_chapters)
3324  return;
3325 
3326  /* find the current chapter */
3327  for (i = 0; i < is->ic->nb_chapters; i++) {
3328  AVChapter *ch = is->ic->chapters[i];
3329  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3330  i--;
3331  break;
3332  }
3333  }
3334 
3335  i += incr;
3336  i = FFMAX(i, 0);
3337  if (i >= is->ic->nb_chapters)
3338  return;
3339 
3340  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3341  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3342  AV_TIME_BASE_Q), 0, 0);
3343 }
3344 
3345 /* handle an event sent by the GUI */
3346 static void event_loop(VideoState *cur_stream)
3347 {
3348  SDL_Event event;
3349  double incr, pos, frac;
3350 
3351  for (;;) {
3352  double x;
3353  refresh_loop_wait_event(cur_stream, &event);
3354  switch (event.type) {
3355  case SDL_KEYDOWN:
3356  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3357  do_exit(cur_stream);
3358  break;
3359  }
3360  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3361  if (!cur_stream->width)
3362  continue;
3363  switch (event.key.keysym.sym) {
3364  case SDLK_f:
3365  toggle_full_screen(cur_stream);
3366  cur_stream->force_refresh = 1;
3367  break;
3368  case SDLK_p:
3369  case SDLK_SPACE:
3370  toggle_pause(cur_stream);
3371  break;
3372  case SDLK_m:
3373  toggle_mute(cur_stream);
3374  break;
3375  case SDLK_KP_MULTIPLY:
3376  case SDLK_0:
3377  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3378  break;
3379  case SDLK_KP_DIVIDE:
3380  case SDLK_9:
3381  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3382  break;
3383  case SDLK_s: // S: Step to next frame
3384  step_to_next_frame(cur_stream);
3385  break;
3386  case SDLK_a:
3388  break;
3389  case SDLK_v:
3391  break;
3392  case SDLK_c:
3396  break;
3397  case SDLK_t:
3399  break;
3400  case SDLK_w:
3401  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3402  if (++cur_stream->vfilter_idx >= nb_vfilters)
3403  cur_stream->vfilter_idx = 0;
3404  } else {
3405  cur_stream->vfilter_idx = 0;
3406  toggle_audio_display(cur_stream);
3407  }
3408  break;
3409  case SDLK_PAGEUP:
3410  if (cur_stream->ic->nb_chapters <= 1) {
3411  incr = 600.0;
3412  goto do_seek;
3413  }
3414  seek_chapter(cur_stream, 1);
3415  break;
3416  case SDLK_PAGEDOWN:
3417  if (cur_stream->ic->nb_chapters <= 1) {
3418  incr = -600.0;
3419  goto do_seek;
3420  }
3421  seek_chapter(cur_stream, -1);
3422  break;
3423  case SDLK_LEFT:
3424  incr = seek_interval ? -seek_interval : -10.0;
3425  goto do_seek;
3426  case SDLK_RIGHT:
3427  incr = seek_interval ? seek_interval : 10.0;
3428  goto do_seek;
3429  case SDLK_UP:
3430  incr = 60.0;
3431  goto do_seek;
3432  case SDLK_DOWN:
3433  incr = -60.0;
3434  do_seek:
3435  if (seek_by_bytes) {
3436  pos = -1;
3437  if (pos < 0 && cur_stream->video_stream >= 0)
3438  pos = frame_queue_last_pos(&cur_stream->pictq);
3439  if (pos < 0 && cur_stream->audio_stream >= 0)
3440  pos = frame_queue_last_pos(&cur_stream->sampq);
3441  if (pos < 0)
3442  pos = avio_tell(cur_stream->ic->pb);
3443  if (cur_stream->ic->bit_rate)
3444  incr *= cur_stream->ic->bit_rate / 8.0;
3445  else
3446  incr *= 180000.0;
3447  pos += incr;
3448  stream_seek(cur_stream, pos, incr, 1);
3449  } else {
3450  pos = get_master_clock(cur_stream);
3451  if (isnan(pos))
3452  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3453  pos += incr;
3454  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3455  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3456  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3457  }
3458  break;
3459  default:
3460  break;
3461  }
3462  break;
3463  case SDL_MOUSEBUTTONDOWN:
3464  if (exit_on_mousedown) {
3465  do_exit(cur_stream);
3466  break;
3467  }
3468  if (event.button.button == SDL_BUTTON_LEFT) {
3469  static int64_t last_mouse_left_click = 0;
3470  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3471  toggle_full_screen(cur_stream);
3472  cur_stream->force_refresh = 1;
3473  last_mouse_left_click = 0;
3474  } else {
3475  last_mouse_left_click = av_gettime_relative();
3476  }
3477  }
3478  case SDL_MOUSEMOTION:
3479  if (cursor_hidden) {
3480  SDL_ShowCursor(1);
3481  cursor_hidden = 0;
3482  }
3484  if (event.type == SDL_MOUSEBUTTONDOWN) {
3485  if (event.button.button != SDL_BUTTON_RIGHT)
3486  break;
3487  x = event.button.x;
3488  } else {
3489  if (!(event.motion.state & SDL_BUTTON_RMASK))
3490  break;
3491  x = event.motion.x;
3492  }
3493  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3494  uint64_t size = avio_size(cur_stream->ic->pb);
3495  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3496  } else {
3497  int64_t ts;
3498  int ns, hh, mm, ss;
3499  int tns, thh, tmm, tss;
3500  tns = cur_stream->ic->duration / 1000000LL;
3501  thh = tns / 3600;
3502  tmm = (tns % 3600) / 60;
3503  tss = (tns % 60);
3504  frac = x / cur_stream->width;
3505  ns = frac * tns;
3506  hh = ns / 3600;
3507  mm = (ns % 3600) / 60;
3508  ss = (ns % 60);
3510  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3511  hh, mm, ss, thh, tmm, tss);
3512  ts = frac * cur_stream->ic->duration;
3513  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3514  ts += cur_stream->ic->start_time;
3515  stream_seek(cur_stream, ts, 0, 0);
3516  }
3517  break;
3518  case SDL_WINDOWEVENT:
3519  switch (event.window.event) {
3520  case SDL_WINDOWEVENT_SIZE_CHANGED:
3521  screen_width = cur_stream->width = event.window.data1;
3522  screen_height = cur_stream->height = event.window.data2;
3523  if (cur_stream->vis_texture) {
3524  SDL_DestroyTexture(cur_stream->vis_texture);
3525  cur_stream->vis_texture = NULL;
3526  }
3527  if (vk_renderer)
3529  case SDL_WINDOWEVENT_EXPOSED:
3530  cur_stream->force_refresh = 1;
3531  }
3532  break;
3533  case SDL_QUIT:
3534  case FF_QUIT_EVENT:
3535  do_exit(cur_stream);
3536  break;
3537  default:
3538  break;
3539  }
3540  }
3541 }
3542 
3543 static int opt_width(void *optctx, const char *opt, const char *arg)
3544 {
3545  double num;
3546  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3547  if (ret < 0)
3548  return ret;
3549 
3550  screen_width = num;
3551  return 0;
3552 }
3553 
3554 static int opt_height(void *optctx, const char *opt, const char *arg)
3555 {
3556  double num;
3557  int ret = parse_number(opt, arg, OPT_TYPE_INT64, 1, INT_MAX, &num);
3558  if (ret < 0)
3559  return ret;
3560 
3561  screen_height = num;
3562  return 0;
3563 }
3564 
3565 static int opt_format(void *optctx, const char *opt, const char *arg)
3566 {
3568  if (!file_iformat) {
3569  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3570  return AVERROR(EINVAL);
3571  }
3572  return 0;
3573 }
3574 
3575 static int opt_sync(void *optctx, const char *opt, const char *arg)
3576 {
3577  if (!strcmp(arg, "audio"))
3579  else if (!strcmp(arg, "video"))
3581  else if (!strcmp(arg, "ext"))
3583  else {
3584  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3585  exit(1);
3586  }
3587  return 0;
3588 }
3589 
3590 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3591 {
3592  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3593  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3594  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT : SHOW_MODE_NONE;
3595 
3596  if (show_mode == SHOW_MODE_NONE) {
3597  double num;
3598  int ret = parse_number(opt, arg, OPT_TYPE_INT, 0, SHOW_MODE_NB-1, &num);
3599  if (ret < 0)
3600  return ret;
3601  show_mode = num;
3602  }
3603  return 0;
3604 }
3605 
3606 static int opt_input_file(void *optctx, const char *filename)
3607 {
3608  if (input_filename) {
3610  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3611  filename, input_filename);
3612  return AVERROR(EINVAL);
3613  }
3614  if (!strcmp(filename, "-"))
3615  filename = "fd:";
3616  input_filename = av_strdup(filename);
3617  if (!input_filename)
3618  return AVERROR(ENOMEM);
3619 
3620  return 0;
3621 }
3622 
3623 static int opt_codec(void *optctx, const char *opt, const char *arg)
3624 {
3625  const char *spec = strchr(opt, ':');
3626  const char **name;
3627  if (!spec) {
3629  "No media specifier was specified in '%s' in option '%s'\n",
3630  arg, opt);
3631  return AVERROR(EINVAL);
3632  }
3633  spec++;
3634 
3635  switch (spec[0]) {
3636  case 'a' : name = &audio_codec_name; break;
3637  case 's' : name = &subtitle_codec_name; break;
3638  case 'v' : name = &video_codec_name; break;
3639  default:
3641  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3642  return AVERROR(EINVAL);
3643  }
3644 
3645  av_freep(name);
3646  *name = av_strdup(arg);
3647  return *name ? 0 : AVERROR(ENOMEM);
3648 }
3649 
3650 static int dummy;
3651 
3652 static const OptionDef options[] = {
3654  { "x", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3655  { "y", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3656  { "fs", OPT_TYPE_BOOL, 0, { &is_full_screen }, "force full screen" },
3657  { "an", OPT_TYPE_BOOL, 0, { &audio_disable }, "disable audio" },
3658  { "vn", OPT_TYPE_BOOL, 0, { &video_disable }, "disable video" },
3659  { "sn", OPT_TYPE_BOOL, 0, { &subtitle_disable }, "disable subtitling" },
3660  { "ast", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3661  { "vst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3662  { "sst", OPT_TYPE_STRING, OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3663  { "ss", OPT_TYPE_TIME, 0, { &start_time }, "seek to a given position in seconds", "pos" },
3664  { "t", OPT_TYPE_TIME, 0, { &duration }, "play \"duration\" seconds of audio/video", "duration" },
3665  { "bytes", OPT_TYPE_INT, 0, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3666  { "seek_interval", OPT_TYPE_FLOAT, 0, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3667  { "nodisp", OPT_TYPE_BOOL, 0, { &display_disable }, "disable graphical display" },
3668  { "noborder", OPT_TYPE_BOOL, 0, { &borderless }, "borderless window" },
3669  { "alwaysontop", OPT_TYPE_BOOL, 0, { &alwaysontop }, "window always on top" },
3670  { "volume", OPT_TYPE_INT, 0, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3671  { "f", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3672  { "stats", OPT_TYPE_BOOL, OPT_EXPERT, { &show_status }, "show status", "" },
3673  { "fast", OPT_TYPE_BOOL, OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3674  { "genpts", OPT_TYPE_BOOL, OPT_EXPERT, { &genpts }, "generate pts", "" },
3675  { "drp", OPT_TYPE_INT, OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3676  { "lowres", OPT_TYPE_INT, OPT_EXPERT, { &lowres }, "", "" },
3677  { "sync", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3678  { "autoexit", OPT_TYPE_BOOL, OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3679  { "exitonkeydown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3680  { "exitonmousedown", OPT_TYPE_BOOL, OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3681  { "loop", OPT_TYPE_INT, OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3682  { "framedrop", OPT_TYPE_BOOL, OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3683  { "infbuf", OPT_TYPE_BOOL, OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3684  { "window_title", OPT_TYPE_STRING, 0, { &window_title }, "set window title", "window title" },
3685  { "left", OPT_TYPE_INT, OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3686  { "top", OPT_TYPE_INT, OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3687  { "vf", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3688  { "af", OPT_TYPE_STRING, 0, { &afilters }, "set audio filters", "filter_graph" },
3689  { "rdftspeed", OPT_TYPE_INT, OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3690  { "showmode", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3691  { "i", OPT_TYPE_BOOL, 0, { &dummy}, "read specified file", "input_file"},
3692  { "codec", OPT_TYPE_FUNC, OPT_FUNC_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3693  { "acodec", OPT_TYPE_STRING, OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3694  { "scodec", OPT_TYPE_STRING, OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3695  { "vcodec", OPT_TYPE_STRING, OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3696  { "autorotate", OPT_TYPE_BOOL, 0, { &autorotate }, "automatically rotate video", "" },
3697  { "find_stream_info", OPT_TYPE_BOOL, OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3698  "read and decode the streams to fill missing information with heuristics" },
3699  { "filter_threads", OPT_TYPE_INT, OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3700  { "enable_vulkan", OPT_TYPE_BOOL, 0, { &enable_vulkan }, "enable vulkan renderer" },
3701  { "vulkan_params", OPT_TYPE_STRING, OPT_EXPERT, { &vulkan_params }, "vulkan configuration using a list of key=value pairs separated by ':'" },
3702  { "hwaccel", OPT_TYPE_STRING, OPT_EXPERT, { &hwaccel }, "use HW accelerated decoding" },
3703  { NULL, },
3704 };
3705 
3706 static void show_usage(void)
3707 {
3708  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3709  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3710  av_log(NULL, AV_LOG_INFO, "\n");
3711 }
3712 
3713 void show_help_default(const char *opt, const char *arg)
3714 {
3716  show_usage();
3717  show_help_options(options, "Main options:", 0, OPT_EXPERT);
3718  show_help_options(options, "Advanced options:", OPT_EXPERT, 0);
3719  printf("\n");
3723  printf("\nWhile playing:\n"
3724  "q, ESC quit\n"
3725  "f toggle full screen\n"
3726  "p, SPC pause\n"
3727  "m toggle mute\n"
3728  "9, 0 decrease and increase volume respectively\n"
3729  "/, * decrease and increase volume respectively\n"
3730  "a cycle audio channel in the current program\n"
3731  "v cycle video channel\n"
3732  "t cycle subtitle channel in the current program\n"
3733  "c cycle program\n"
3734  "w cycle video filters or show modes\n"
3735  "s activate frame-step mode\n"
3736  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3737  "down/up seek backward/forward 1 minute\n"
3738  "page down/page up seek backward/forward 10 minutes\n"
3739  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3740  "left double-click toggle full screen\n"
3741  );
3742 }
3743 
3744 /* Called from the main */
3745 int main(int argc, char **argv)
3746 {
3747  int flags, ret;
3748  VideoState *is;
3749 
3750  init_dynload();
3751 
3753  parse_loglevel(argc, argv, options);
3754 
3755  /* register all codecs, demux and protocols */
3756 #if CONFIG_AVDEVICE
3758 #endif
3760 
3761  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3762  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3763 
3764  show_banner(argc, argv, options);
3765 
3766  ret = parse_options(NULL, argc, argv, options, opt_input_file);
3767  if (ret < 0)
3768  exit(ret == AVERROR_EXIT ? 0 : 1);
3769 
3770  if (!input_filename) {
3771  show_usage();
3772  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3774  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3775  exit(1);
3776  }
3777 
3778  if (display_disable) {
3779  video_disable = 1;
3780  }
3781  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3782  if (audio_disable)
3783  flags &= ~SDL_INIT_AUDIO;
3784  else {
3785  /* Try to work around an occasional ALSA buffer underflow issue when the
3786  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3787  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3788  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3789  }
3790  if (display_disable)
3791  flags &= ~SDL_INIT_VIDEO;
3792  if (SDL_Init (flags)) {
3793  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3794  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3795  exit(1);
3796  }
3797 
3798  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3799  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3800 
3801  if (!display_disable) {
3802  int flags = SDL_WINDOW_HIDDEN;
3803  if (alwaysontop)
3804 #if SDL_VERSION_ATLEAST(2,0,5)
3805  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3806 #else
3807  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3808 #endif
3809  if (borderless)
3810  flags |= SDL_WINDOW_BORDERLESS;
3811  else
3812  flags |= SDL_WINDOW_RESIZABLE;
3813 
3814 #ifdef SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR
3815  SDL_SetHint(SDL_HINT_VIDEO_X11_NET_WM_BYPASS_COMPOSITOR, "0");
3816 #endif
3817  if (hwaccel && !enable_vulkan) {
3818  av_log(NULL, AV_LOG_INFO, "Enable vulkan renderer to support hwaccel %s\n", hwaccel);
3819  enable_vulkan = 1;
3820  }
3821  if (enable_vulkan) {
3823  if (vk_renderer) {
3824 #if SDL_VERSION_ATLEAST(2, 0, 6)
3825  flags |= SDL_WINDOW_VULKAN;
3826 #endif
3827  } else {
3828  av_log(NULL, AV_LOG_WARNING, "Doesn't support vulkan renderer, fallback to SDL renderer\n");
3829  enable_vulkan = 0;
3830  }
3831  }
3832  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3833  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3834  if (!window) {
3835  av_log(NULL, AV_LOG_FATAL, "Failed to create window: %s", SDL_GetError());
3836  do_exit(NULL);
3837  }
3838 
3839  if (vk_renderer) {
3840  AVDictionary *dict = NULL;
3841 
3842  if (vulkan_params)
3843  av_dict_parse_string(&dict, vulkan_params, "=", ":", 0);
3845  av_dict_free(&dict);
3846  if (ret < 0) {
3847  av_log(NULL, AV_LOG_FATAL, "Failed to create vulkan renderer, %s\n", av_err2str(ret));
3848  do_exit(NULL);
3849  }
3850  } else {
3851  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3852  if (!renderer) {
3853  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3854  renderer = SDL_CreateRenderer(window, -1, 0);
3855  }
3856  if (renderer) {
3857  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3858  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3859  }
3860  if (!renderer || !renderer_info.num_texture_formats) {
3861  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3862  do_exit(NULL);
3863  }
3864  }
3865  }
3866 
3868  if (!is) {
3869  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3870  do_exit(NULL);
3871  }
3872 
3873  event_loop(is);
3874 
3875  /* never returns */
3876 
3877  return 0;
3878 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
AVSubtitle
Definition: avcodec.h:2227
rect::w
int w
Definition: f_ebur128.c:77
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2508
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1303
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:212
AV_PKT_DATA_DISPLAYMATRIX
@ AV_PKT_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:109
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: packet.c:427
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:107
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:282
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
Frame::width
int width
Definition: ffplay.c:159
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:487
av_clip
#define av_clip
Definition: common.h:99
VideoState::rdft
AVTXContext * rdft
Definition: ffplay.c:263
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:132
av_sync_type
static int av_sync_type
Definition: ffplay.c:325
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:363
FrameData::pkt_pos
int64_t pkt_pos
Definition: ffplay.c:148
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1107
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:802
VideoState::agraph
AVFilterGraph * agraph
Definition: ffplay.c:297
configure_audio_filters
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:1990
opt_add_vfilter
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:394
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:786
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
Decoder::finished
int finished
Definition: ffplay.c:191
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
FrameData
Definition: ffmpeg.h:630
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1184
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:808
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, const AVCodec **decoder_ret, int flags)
Definition: avformat.c:443
out
FILE * out
Definition: movenc.c:55
VideoState::rdft_fn
av_tx_fn rdft_fn
Definition: ffplay.c:264
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1050
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2155
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:231
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:947
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1335
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:453
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:84
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1355
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:47
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:162
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:173
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:948
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:241
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:579
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:674
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:176
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:814
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: avformat.c:392
display_disable
static int display_disable
Definition: ffplay.c:320
screen_width
static int screen_width
Definition: ffplay.c:310
ffplay_renderer.h
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:56
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:904
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:102
AVTXContext
Definition: tx_priv.h:235
rect
Definition: f_ebur128.c:77
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1521
AVFMT_NOTIMESTAMPS
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:479
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
VideoState::auddec
Decoder auddec
Definition: ffplay.c:225
screen_left
static int screen_left
Definition: ffplay.c:312
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:806
AudioParams::frame_size
int frame_size
Definition: ffplay.c:133
AVSubtitleRect
Definition: avcodec.h:2200
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:196
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2144
rect::y
int y
Definition: f_ebur128.c:77
FrameQueue::size
int size
Definition: ffplay.c:171
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:197
av_unused
#define av_unused
Definition: attributes.h:131
normalize.log
log
Definition: normalize.py:21
Frame::sar
AVRational sar
Definition: ffplay.c:162
out_size
int out_size
Definition: movenc.c:56
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:160
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:270
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1753
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
AudioParams
Definition: ffplay.c:129
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:275
VideoState::audio_filter_src
struct AudioParams audio_filter_src
Definition: ffplay.c:251
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1323
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:280
AVFrame::width
int width
Definition: frame.h:446
VideoState::xleft
int xleft
Definition: ffplay.c:289
AVPacketSideData
This structure stores auxiliary information for decoding, presenting, or otherwise processing the cod...
Definition: packet.h:375
Frame::pts
double pts
Definition: ffplay.c:156
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:686
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:168
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:689
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:340
parse_number
int parse_number(const char *context, const char *numstr, enum OptionType type, double min, double max, double *dst)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:84
AV_HWDEVICE_TYPE_NONE
@ AV_HWDEVICE_TYPE_NONE
Definition: hwcontext.h:28
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:66
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:172
vk_renderer_create
int vk_renderer_create(VkRenderer *renderer, SDL_Window *window, AVDictionary *opt)
Definition: ffplay_renderer.c:812
AVChapter::start
int64_t start
Definition: avformat.h:1217
Clock
Definition: ffplay.c:137
data
const char data[16]
Definition: mxf.c:148
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:126
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:61
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:188
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:58
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2446
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:239
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:75
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:452
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:236
av_hwdevice_find_type_by_name
enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name)
Look up an AVHWDeviceType by name.
Definition: hwcontext.c:102
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:104
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:75
AVComplexFloat
Definition: tx.h:27
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:140
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:76
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:102
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
autorotate
static int autorotate
Definition: ffplay.c:348
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:542
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:370
video_disable
static int video_disable
Definition: ffplay.c:315
Frame::uploaded
int uploaded
Definition: ffplay.c:163
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:1206
AVDictionary
Definition: dict.c:34
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1536
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
Frame
Definition: ffplay.c:152
opt_input_file
static int opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3606
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:183
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1267
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: demux.c:1538
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:72
vk_renderer_destroy
void vk_renderer_destroy(VkRenderer *renderer)
Definition: ffplay_renderer.c:833
VideoState::paused
int paused
Definition: ffplay.c:206
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1417
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:323
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1450
VideoState::width
int width
Definition: ffplay.c:289
file_iformat
static const AVInputFormat * file_iformat
Definition: ffplay.c:305
sample_rate
sample_rate
Definition: ffmpeg_filter.c:424
dummy
static int dummy
Definition: ffplay.c:3650
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:359
av_packet_free
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: packet.c:74
PacketQueue
Definition: ffplay.c:113
av_tx_init
av_cold int av_tx_init(AVTXContext **ctx, av_tx_fn *tx, enum AVTXType type, int inv, int len, const void *scale, uint64_t flags)
Initialize a transform context with the given configuration (i)MDCTs with an odd length are currently...
Definition: tx.c:903
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2258
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:591
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:395
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:299
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:258
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
OptionDef
Definition: cmdutils.h:126
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2361
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:316
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:221
genpts
static int genpts
Definition: ffplay.c:329
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:253
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3575
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1528
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame)
Definition: ffplay.c:906
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:223
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:369
FrameQueue::rindex
int rindex
Definition: ffplay.c:169
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1371
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:207
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: demux.c:363
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:615
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1528
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:70
startup_volume
static int startup_volume
Definition: ffplay.c:323
window
static SDL_Window * window
Definition: ffplay.c:361
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
fifo.h
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3283
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:144
VideoState::extclk
Clock extclk
Definition: ffplay.c:219
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:210
alwaysontop
static int alwaysontop
Definition: ffplay.c:322
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:239
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:470
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:95
AVCodecContext::ch_layout
AVChannelLayout ch_layout
Audio channel layout.
Definition: avcodec.h:1065
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
FrameQueue
Definition: ffplay.c:167
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:441
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2201
VideoState::video_stream
int video_stream
Definition: ffplay.c:281
autoexit
static int autoexit
Definition: ffplay.c:332
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:494
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1214
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:963
val
static double val(void *priv, double ch)
Definition: aeval.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3590
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:193
pts
static int64_t pts
Definition: transcode_aac.c:644
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1411
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:237
OPT_TYPE_FLOAT
@ OPT_TYPE_FLOAT
Definition: cmdutils.h:86
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:740
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:235
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:299
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:202
fast
static int fast
Definition: ffplay.c:328
loop
static int loop
Definition: ffplay.c:335
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:559
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:265
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *const *out_arg, int out_count, const uint8_t *const *in_arg, int in_count)
Convert audio.
Definition: swresample.c:719
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3554
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: avcodec.c:393
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:148
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1397
is_full_screen
static int is_full_screen
Definition: ffplay.c:356
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, const AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:932
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:947
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:164
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:206
pkt
AVPacket * pkt
Definition: movenc.c:60
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1490
vk_get_renderer
VkRenderer * vk_get_renderer(void)
Definition: ffplay_renderer.c:805
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
AVInputFormat
Definition: avformat.h:548
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2066
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1405
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:761
VideoState
Definition: ffplay.c:201
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:734
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2470
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1425
av_tx_fn
void(* av_tx_fn)(AVTXContext *s, void *out, void *in, ptrdiff_t stride)
Function pointer to a function to perform the transform.
Definition: tx.h:151
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:140
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, const AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: demux.c:215
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:722
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
av_channel_layout_describe
int av_channel_layout_describe(const AVChannelLayout *channel_layout, char *buf, size_t buf_size)
Get a human-readable string describing the channel layout properties.
Definition: channel_layout.c:645
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:149
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:257
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:189
s
#define s(width, name)
Definition: cbs_vp9.c:198
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3713
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:59
default_height
static int default_height
Definition: ffplay.c:309
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1406
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:144
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:553
AVFormatContext::iformat
const struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1267
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:456
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
Definition: pixfmt.h:616
AVDictionaryEntry::key
char * key
Definition: dict.h:90
Clock::last_updated
double last_updated
Definition: ffplay.c:140
PacketQueue::duration
int64_t duration
Definition: ffplay.c:117
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2202
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:134
video_stream
static AVStream * video_stream
Definition: demux_decode.c:42
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:861
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:311
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:92
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:190
configure_video_filters
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1860
avcodec_receive_frame
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder or encoder (when the AV_CODEC_FLAG_RECON_FRAME flag is used...
Definition: avcodec.c:696
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVHWDeviceType
AVHWDeviceType
Definition: hwcontext.h:27
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
setup_find_stream_info_opts
int setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts, AVDictionary ***dst)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:1048
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: demux_utils.c:182
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3623
AVPacketSideData::data
uint8_t * data
Definition: packet.h:376
Clock::pts_drift
double pts_drift
Definition: ffplay.c:139
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:283
ctx
AVFormatContext * ctx
Definition: movenc.c:49
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: avformat.c:727
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:182
channels
channels
Definition: aptx.h:31
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:98
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:174
nb_streams
static int nb_streams
Definition: ffprobe.c:384
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
vk_renderer_get_hw_dev
int vk_renderer_get_hw_dev(VkRenderer *renderer, AVBufferRef **dev)
Definition: ffplay_renderer.c:818
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:205
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1385
vk_renderer_display
int vk_renderer_display(VkRenderer *renderer, AVFrame *frame)
Definition: ffplay_renderer.c:823
screen_top
static int screen_top
Definition: ffplay.c:313
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:238
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:90
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:73
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:362
filter_codec_opts
int filter_codec_opts(const AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, const AVCodec *codec, AVDictionary **dst)
Filter out options for given codec.
Definition: cmdutils.c:987
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1566
NAN
#define NAN
Definition: mathematics.h:115
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:454
VideoState::step
int step
Definition: ffplay.c:290
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2313
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:455
arg
const char * arg
Definition: jacosubdec.c:67
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:307
Clock::speed
double speed
Definition: ffplay.c:141
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:181
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:258
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: defs.h:219
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:442
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:631
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:75
opts
AVDictionary * opts
Definition: movenc.c:51
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const struct AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2203
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3318
VkRenderer
Definition: ffplay_renderer.c:49
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1433
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:186
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
frame_queue_destroy
static void frame_queue_destroy(FrameQueue *f)
Definition: ffplay.c:710
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1243
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:782
NULL
#define NULL
Definition: coverity.c:32
avcodec_find_decoder_by_name
const AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:1001
FrameQueue::max_size
int max_size
Definition: ffplay.c:172
AV_DICT_MULTIKEY
#define AV_DICT_MULTIKEY
Allow to store several equal keys in the dictionary.
Definition: dict.h:84
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:142
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:74
Decoder
Definition: ffmpeg.h:380
AudioParams::freq
int freq
Definition: ffplay.c:130
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:164
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AudioParams::ch_layout
AVChannelLayout ch_layout
Definition: ffplay.c:131
audio_open
static int audio_open(void *opaque, AVChannelLayout *wanted_channel_layout, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2513
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3204
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:255
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:357
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1297
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:452
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:683
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:168
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:262
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:192
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:343
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1183
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:729
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:71
Frame::duration
double duration
Definition: ffplay.c:157
lowres
static int lowres
Definition: ffplay.c:330
double
double
Definition: af_crystalizer.c:131
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)3R 3G 2B(lsb)
Definition: pixfmt.h:93
avcodec_open2
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:142
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:158
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:279
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1397
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1510
TextureFormatEntry
Definition: ffplay.c:368
AVFilterGraph
Definition: avfilter.h:813
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2621
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: packet.c:435
fp
#define fp
Definition: regdef.h:44
AVCodecParameters::ch_layout
AVChannelLayout ch_layout
Audio only.
Definition: codec_par.h:180
VideoState::rdft_data
AVComplexFloat * rdft_data
Definition: ffplay.c:267
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:484
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:448
exp
int8_t exp
Definition: eval.c:73
VideoState::seek_req
int seek_req
Definition: ffplay.c:209
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:258
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:233
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:213
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3346
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:289
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:184
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:791
av_packet_side_data_get
const AVPacketSideData * av_packet_side_data_get(const AVPacketSideData *sd, int nb_sd, enum AVPacketSideDataType type)
Get side information from a side data array.
Definition: packet.c:654
avcodec_find_decoder
const AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:973
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:260
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1311
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:334
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
VideoState::iformat
const AVInputFormat * iformat
Definition: ffplay.c:203
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:197
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1854
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:240
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:58
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:357
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
A generic parameter which can be set by the user for filtering.
Definition: opt.h:298
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: demux.c:2516
Frame::format
int format
Definition: ffplay.c:161
INSERT_FILT
#define INSERT_FILT(name, arg)
f
f
Definition: af_crystalizer.c:121
swr_alloc_set_opts2
int swr_alloc_set_opts2(struct SwrContext **ps, const AVChannelLayout *out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, const AVChannelLayout *in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:40
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:509
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:476
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:75
OPT_TYPE_INT
@ OPT_TYPE_INT
Definition: cmdutils.h:84
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:462
AVPacket::size
int size
Definition: packet.h:525
VideoState::in_audio_filter
AVFilterContext * in_audio_filter
Definition: ffplay.c:295
AVFifo
Definition: fifo.c:35
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: avformat.c:681
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:247
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:161
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: defs.h:214
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:175
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:744
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:144
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:103
Frame::sub
AVSubtitle sub
Definition: ffplay.c:154
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:299
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:122
vfilters_list
static const char ** vfilters_list
Definition: ffplay.c:345
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
create_hwaccel
static int create_hwaccel(AVBufferRef **device_ctx)
Definition: ffplay.c:2589
decoder_init
static int decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:566
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:149
sp
#define sp
Definition: regdef.h:63
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
sdl_supported_color_spaces
static enum AVColorSpace sdl_supported_color_spaces[]
Definition: ffplay.c:940
start_time
static int64_t start_time
Definition: ffplay.c:326
audio_stream
static AVStream * audio_stream
Definition: demux_decode.c:42
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:258
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1057
Frame::serial
int serial
Definition: ffplay.c:155
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are expressed.
Definition: avcodec.h:551
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:62
size
int size
Definition: twinvq_data.h:10344
VideoState::xpos
int xpos
Definition: ffplay.c:268
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2814
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:471
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: seek.c:663
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:143
rect::h
int h
Definition: f_ebur128.c:77
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:271
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:121
AVFrameSideData::data
uint8_t * data
Definition: frame.h:252
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:272
OPT_TYPE_INT64
@ OPT_TYPE_INT64
Definition: cmdutils.h:85
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:261
fn
#define fn(a)
Definition: aap_template.c:37
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:317
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:337
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:284
avdevice.h
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: opt_common.c:237
diff
static av_always_inline int diff(const struct color_info *a, const struct color_info *b, const int trans_thresh)
Definition: vf_paletteuse.c:165
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:504
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:523
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:254
update_video_pts
static void update_video_pts(VideoState *is, double pts, int serial)
Definition: ffplay.c:1578
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1516
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:817
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1586
AV_CHANNEL_ORDER_NATIVE
@ AV_CHANNEL_ORDER_NATIVE
The native channel order, i.e.
Definition: channel_layout.h:118
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:608
seek_interval
static float seek_interval
Definition: ffplay.c:319
av_packet_alloc
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: packet.c:63
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
rect::x
int x
Definition: f_ebur128.c:77
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:211
OPT_TYPE_FUNC
@ OPT_TYPE_FUNC
Definition: cmdutils.h:81
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:776
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:364
OPT_TYPE_BOOL
@ OPT_TYPE_BOOL
Definition: cmdutils.h:82
av_tx_uninit
av_cold void av_tx_uninit(AVTXContext **ctx)
Frees a context and sets *ctx to NULL, does nothing when *ctx == NULL.
Definition: tx.c:295
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1330
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:801
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:424
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:512
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:341
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:201
iformat
static const AVInputFormat * iformat
Definition: ffprobe.c:360
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:490
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:831
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:532
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:40
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:64
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:406
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:65
nb_vfilters
static int nb_vfilters
Definition: ffplay.c:346
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:208
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
FrameQueue::windex
int windex
Definition: ffplay.c:170
VideoState::filename
char * filename
Definition: ffplay.c:288
VideoState::muted
int muted
Definition: ffplay.c:249
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:194
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
bprint.h
Clock::pts
double pts
Definition: ffplay.c:138
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:116
av_hwdevice_ctx_create_derived
int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ref_ptr, enum AVHWDeviceType type, AVBufferRef *src_ref, int flags)
Create a new device of the specified type from an existing device.
Definition: hwcontext.c:703
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:124
PacketQueue::serial
int serial
Definition: ffplay.c:119
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:517
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:250
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:62
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:243
OPT_TYPE_TIME
@ OPT_TYPE_TIME
Definition: cmdutils.h:88
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:919
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:57
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1053
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:195
AVCodecParameters::height
int height
Definition: codec_par.h:135
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:435
Decoder::pkt
AVPacket * pkt
Definition: ffplay.c:187
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:31
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1347
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:888
show_status
static int show_status
Definition: ffplay.c:324
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3565
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
parse_options
int parse_options(void *optctx, int argc, char **argv, const OptionDef *options, int(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:405
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:466
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
vk_renderer_resize
int vk_renderer_resize(VkRenderer *renderer, int width, int height)
Definition: ffplay_renderer.c:828
borderless
static int borderless
Definition: ffplay.c:321
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2293
MyAVPacketList
Definition: ffplay.c:108
OPT_FUNC_ARG
#define OPT_FUNC_ARG
Definition: cmdutils.h:136
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:58
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1179
AVCodecContext::hw_device_ctx
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:1497
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: demux_utils.c:191
len
int len
Definition: vorbis_enc_data.h:426
Frame::frame
AVFrame * frame
Definition: ffplay.c:153
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:470
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:121
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:39
vk_renderer
static VkRenderer * vk_renderer
Definition: ffplay.c:366
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:115
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:127
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:760
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:311
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:61
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:465
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:540
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:142
VideoState::height
int height
Definition: ffplay.c:289
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:84
AVStream::disposition
int disposition
Stream disposition - a combination of AV_DISPOSITION_* flags.
Definition: avformat.h:812
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1407
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:222
av_buffer_allocz
AVBufferRef * av_buffer_allocz(size_t size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:93
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:318
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:67
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: avformat.c:750
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2204
stream_open
static VideoState * stream_open(const char *filename, const AVInputFormat *iformat)
Definition: ffplay.c:3145
FFSWAP
#define FFSWAP(type, a, b)
Definition: macros.h:52
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: avcodec.c:364
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:455
VideoState::vfilter_idx
int vfilter_idx
Definition: ffplay.c:292
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:350
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:70
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:344
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:258
av_hwdevice_ctx_create
int av_hwdevice_ctx_create(AVBufferRef **pdevice_ref, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags)
Open a device of the specified type and create an AVHWDeviceContext for it.
Definition: hwcontext.c:600
find_stream_info
static int find_stream_info
Definition: ffplay.c:349
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:418
hwaccel
static const char * hwaccel
Definition: ffplay.c:353
pos
unsigned int pos
Definition: spdifenc.c:414
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:246
avformat.h
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
VideoState::out_video_filter
AVFilterContext * out_video_filter
Definition: ffplay.c:294
dict.h
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:481
VideoState::last_paused
int last_paused
Definition: ffplay.c:207
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:370
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:88
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: opt_common.h:199
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:74
rdftspeed
double rdftspeed
Definition: ffplay.c:342
AV_TX_FLOAT_RDFT
@ AV_TX_FLOAT_RDFT
Real to complex and complex to real DFTs.
Definition: tx.h:90
MyAVPacketList::serial
int serial
Definition: ffplay.c:110
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3543
enable_vulkan
static int enable_vulkan
Definition: ffplay.c:351
main
int main(int argc, char **argv)
Definition: ffplay.c:3745
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:571
show_usage
static void show_usage(void)
Definition: ffplay.c:3706
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVCodecContext
main external API structure.
Definition: avcodec.h:445
AVFrame::height
int height
Definition: frame.h:446
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:120
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:523
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:218
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:339
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:82
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:338
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:121
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2433
PacketQueue::size
int size
Definition: ffplay.c:116
options
static const OptionDef options[]
Definition: ffplay.c:3652
opt_common.h
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:567
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
VideoState::in_video_filter
AVFilterContext * in_video_filter
Definition: ffplay.c:293
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:274
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:204
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:826
av_bprint_clear
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:232
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:433
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:245
VideoState::eof
int eof
Definition: ffplay.c:286
av_dict_parse_string
int av_dict_parse_string(AVDictionary **pm, const char *str, const char *key_val_sep, const char *pairs_sep, int flags)
Parse the key/value pairs list and add the parsed entries to a dictionary.
Definition: dict.c:200
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:80
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:678
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:77
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:202
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:248
VideoState::subdec
Decoder subdec
Definition: ffplay.c:227
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:238
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2790
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
VideoState::out_audio_filter
AVFilterContext * out_audio_filter
Definition: ffplay.c:296
av_find_input_format
const AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:144
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1390
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:526
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:465
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:125
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
input_filename
static const char * input_filename
Definition: ffplay.c:306
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:440
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1497
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:301
vulkan_params
static char * vulkan_params
Definition: ffplay.c:352
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set() that converts the value to a string and stores it.
Definition: dict.c:167
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3289
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:453
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:272
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
A generic parameter which can be set by the user for demuxing or decoding.
Definition: opt.h:273
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:481
VideoState::real_data
float * real_data
Definition: ffplay.c:266
mem.h
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1784
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:250
default_width
static int default_width
Definition: ffplay.c:308
configure_filtergraph
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1817
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:60
VideoState::realtime
int realtime
Definition: ffplay.c:215
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:285
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
OPT_TYPE_STRING
@ OPT_TYPE_STRING
Definition: cmdutils.h:83
AVPacket
This structure stores compressed data.
Definition: packet.h:501
audio_disable
static int audio_disable
Definition: ffplay.c:314
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3301
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:88
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:557
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1209
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:276
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:408
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:198
d
d
Definition: ffmpeg_filter.c:424
int32_t
int32_t
Definition: audioconvert.c:56
framedrop
static int framedrop
Definition: ffplay.c:336
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:229
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:244
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:474
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:419
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1380
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:118
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:214
VideoState::viddec
Decoder viddec
Definition: ffplay.c:226
h
h
Definition: vp9dsp_template.c:2038
AVERROR_EXIT
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:58
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:792
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:242
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:739
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:269
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int by_bytes)
Definition: ffplay.c:1483
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:331
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / derived in SMPTE RP 177 Annex B
Definition: pixfmt.h:611
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:252
afilters
static char * afilters
Definition: ffplay.c:347
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1216
int
int
Definition: ffmpeg_filter.c:424
SwsContext
Definition: swscale_internal.h:301
VideoState::audclk
Clock audclk
Definition: ffplay.c:217
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1608
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
print_error
static void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.h:401
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:177
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1058
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:78
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:134
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1468
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2229
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:87
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:91
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, AVPacket *pkt, int stream_index)
Definition: ffplay.c:463
duration
static int64_t duration
Definition: ffplay.c:327
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63
MyAVPacketList::pkt
AVPacket * pkt
Definition: ffplay.c:109
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2797
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:312
PacketQueue::pkt_list
AVFifo * pkt_list
Definition: ffplay.c:114
Frame::height
int height
Definition: ffplay.c:160
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2784
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2885
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:278
tx.h
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:234
avdevice_register_all
FF_VISIBILITY_POP_HIDDEN av_cold void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:70
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:346
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:837
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:467
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:333