FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
260 
261  enum ShowMode {
263  } show_mode;
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int screen_left = SDL_WINDOWPOS_CENTERED;
318 static int screen_top = SDL_WINDOWPOS_CENTERED;
319 static int audio_disable;
320 static int video_disable;
321 static int subtitle_disable;
322 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
323 static int seek_by_bytes = -1;
324 static float seek_interval = 10;
325 static int display_disable;
326 static int borderless;
327 static int startup_volume = 100;
328 static int show_status = 1;
330 static int64_t start_time = AV_NOPTS_VALUE;
331 static int64_t duration = AV_NOPTS_VALUE;
332 static int fast = 0;
333 static int genpts = 0;
334 static int lowres = 0;
335 static int decoder_reorder_pts = -1;
336 static int autoexit;
337 static int exit_on_keydown;
338 static int exit_on_mousedown;
339 static int loop = 1;
340 static int framedrop = -1;
341 static int infinite_buffer = -1;
342 static enum ShowMode show_mode = SHOW_MODE_NONE;
343 static const char *audio_codec_name;
344 static const char *subtitle_codec_name;
345 static const char *video_codec_name;
346 double rdftspeed = 0.02;
347 static int64_t cursor_last_shown;
348 static int cursor_hidden = 0;
349 #if CONFIG_AVFILTER
350 static const char **vfilters_list = NULL;
351 static int nb_vfilters = 0;
352 static char *afilters = NULL;
353 #endif
354 static int autorotate = 1;
355 static int find_stream_info = 1;
356 
357 /* current context */
358 static int is_full_screen;
359 static int64_t audio_callback_time;
360 
362 
363 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
364 
365 static SDL_Window *window;
366 static SDL_Renderer *renderer;
367 static SDL_RendererInfo renderer_info = {0};
368 static SDL_AudioDeviceID audio_dev;
369 
370 static const struct TextureFormatEntry {
374  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
375  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
376  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
377  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
378  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
379  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
380  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
381  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
382  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
383  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
384  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
385  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
386  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
387  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
388  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
389  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
390  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
391  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
392  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
393  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
394 };
395 
396 #if CONFIG_AVFILTER
397 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
398 {
399  GROW_ARRAY(vfilters_list, nb_vfilters);
400  vfilters_list[nb_vfilters - 1] = arg;
401  return 0;
402 }
403 #endif
404 
405 static inline
406 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
407  enum AVSampleFormat fmt2, int64_t channel_count2)
408 {
409  /* If channel count == 1, planar and non-planar formats are the same */
410  if (channel_count1 == 1 && channel_count2 == 1)
412  else
413  return channel_count1 != channel_count2 || fmt1 != fmt2;
414 }
415 
416 static inline
417 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
418 {
419  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
420  return channel_layout;
421  else
422  return 0;
423 }
424 
426 {
427  MyAVPacketList *pkt1;
428 
429  if (q->abort_request)
430  return -1;
431 
432  pkt1 = av_malloc(sizeof(MyAVPacketList));
433  if (!pkt1)
434  return -1;
435  pkt1->pkt = *pkt;
436  pkt1->next = NULL;
437  if (pkt == &flush_pkt)
438  q->serial++;
439  pkt1->serial = q->serial;
440 
441  if (!q->last_pkt)
442  q->first_pkt = pkt1;
443  else
444  q->last_pkt->next = pkt1;
445  q->last_pkt = pkt1;
446  q->nb_packets++;
447  q->size += pkt1->pkt.size + sizeof(*pkt1);
448  q->duration += pkt1->pkt.duration;
449  /* XXX: should duplicate packet data in DV case */
450  SDL_CondSignal(q->cond);
451  return 0;
452 }
453 
455 {
456  int ret;
457 
458  SDL_LockMutex(q->mutex);
459  ret = packet_queue_put_private(q, pkt);
460  SDL_UnlockMutex(q->mutex);
461 
462  if (pkt != &flush_pkt && ret < 0)
463  av_packet_unref(pkt);
464 
465  return ret;
466 }
467 
468 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
469 {
470  AVPacket pkt1, *pkt = &pkt1;
471  av_init_packet(pkt);
472  pkt->data = NULL;
473  pkt->size = 0;
474  pkt->stream_index = stream_index;
475  return packet_queue_put(q, pkt);
476 }
477 
478 /* packet queue handling */
480 {
481  memset(q, 0, sizeof(PacketQueue));
482  q->mutex = SDL_CreateMutex();
483  if (!q->mutex) {
484  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
485  return AVERROR(ENOMEM);
486  }
487  q->cond = SDL_CreateCond();
488  if (!q->cond) {
489  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
490  return AVERROR(ENOMEM);
491  }
492  q->abort_request = 1;
493  return 0;
494 }
495 
497 {
498  MyAVPacketList *pkt, *pkt1;
499 
500  SDL_LockMutex(q->mutex);
501  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
502  pkt1 = pkt->next;
503  av_packet_unref(&pkt->pkt);
504  av_freep(&pkt);
505  }
506  q->last_pkt = NULL;
507  q->first_pkt = NULL;
508  q->nb_packets = 0;
509  q->size = 0;
510  q->duration = 0;
511  SDL_UnlockMutex(q->mutex);
512 }
513 
515 {
517  SDL_DestroyMutex(q->mutex);
518  SDL_DestroyCond(q->cond);
519 }
520 
522 {
523  SDL_LockMutex(q->mutex);
524 
525  q->abort_request = 1;
526 
527  SDL_CondSignal(q->cond);
528 
529  SDL_UnlockMutex(q->mutex);
530 }
531 
533 {
534  SDL_LockMutex(q->mutex);
535  q->abort_request = 0;
536  packet_queue_put_private(q, &flush_pkt);
537  SDL_UnlockMutex(q->mutex);
538 }
539 
540 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
541 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
542 {
543  MyAVPacketList *pkt1;
544  int ret;
545 
546  SDL_LockMutex(q->mutex);
547 
548  for (;;) {
549  if (q->abort_request) {
550  ret = -1;
551  break;
552  }
553 
554  pkt1 = q->first_pkt;
555  if (pkt1) {
556  q->first_pkt = pkt1->next;
557  if (!q->first_pkt)
558  q->last_pkt = NULL;
559  q->nb_packets--;
560  q->size -= pkt1->pkt.size + sizeof(*pkt1);
561  q->duration -= pkt1->pkt.duration;
562  *pkt = pkt1->pkt;
563  if (serial)
564  *serial = pkt1->serial;
565  av_free(pkt1);
566  ret = 1;
567  break;
568  } else if (!block) {
569  ret = 0;
570  break;
571  } else {
572  SDL_CondWait(q->cond, q->mutex);
573  }
574  }
575  SDL_UnlockMutex(q->mutex);
576  return ret;
577 }
578 
579 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
580  memset(d, 0, sizeof(Decoder));
581  d->avctx = avctx;
582  d->queue = queue;
583  d->empty_queue_cond = empty_queue_cond;
585  d->pkt_serial = -1;
586 }
587 
589  int ret = AVERROR(EAGAIN);
590 
591  for (;;) {
592  AVPacket pkt;
593 
594  if (d->queue->serial == d->pkt_serial) {
595  do {
596  if (d->queue->abort_request)
597  return -1;
598 
599  switch (d->avctx->codec_type) {
600  case AVMEDIA_TYPE_VIDEO:
601  ret = avcodec_receive_frame(d->avctx, frame);
602  if (ret >= 0) {
603  if (decoder_reorder_pts == -1) {
604  frame->pts = frame->best_effort_timestamp;
605  } else if (!decoder_reorder_pts) {
606  frame->pts = frame->pkt_dts;
607  }
608  }
609  break;
610  case AVMEDIA_TYPE_AUDIO:
611  ret = avcodec_receive_frame(d->avctx, frame);
612  if (ret >= 0) {
613  AVRational tb = (AVRational){1, frame->sample_rate};
614  if (frame->pts != AV_NOPTS_VALUE)
615  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
616  else if (d->next_pts != AV_NOPTS_VALUE)
617  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
618  if (frame->pts != AV_NOPTS_VALUE) {
619  d->next_pts = frame->pts + frame->nb_samples;
620  d->next_pts_tb = tb;
621  }
622  }
623  break;
624  }
625  if (ret == AVERROR_EOF) {
626  d->finished = d->pkt_serial;
628  return 0;
629  }
630  if (ret >= 0)
631  return 1;
632  } while (ret != AVERROR(EAGAIN));
633  }
634 
635  do {
636  if (d->queue->nb_packets == 0)
637  SDL_CondSignal(d->empty_queue_cond);
638  if (d->packet_pending) {
639  av_packet_move_ref(&pkt, &d->pkt);
640  d->packet_pending = 0;
641  } else {
642  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
643  return -1;
644  }
645  } while (d->queue->serial != d->pkt_serial);
646 
647  if (pkt.data == flush_pkt.data) {
649  d->finished = 0;
650  d->next_pts = d->start_pts;
651  d->next_pts_tb = d->start_pts_tb;
652  } else {
654  int got_frame = 0;
655  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
656  if (ret < 0) {
657  ret = AVERROR(EAGAIN);
658  } else {
659  if (got_frame && !pkt.data) {
660  d->packet_pending = 1;
661  av_packet_move_ref(&d->pkt, &pkt);
662  }
663  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
664  }
665  } else {
666  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
667  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
668  d->packet_pending = 1;
669  av_packet_move_ref(&d->pkt, &pkt);
670  }
671  }
672  av_packet_unref(&pkt);
673  }
674  }
675 }
676 
677 static void decoder_destroy(Decoder *d) {
678  av_packet_unref(&d->pkt);
680 }
681 
683 {
684  av_frame_unref(vp->frame);
685  avsubtitle_free(&vp->sub);
686 }
687 
688 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
689 {
690  int i;
691  memset(f, 0, sizeof(FrameQueue));
692  if (!(f->mutex = SDL_CreateMutex())) {
693  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
694  return AVERROR(ENOMEM);
695  }
696  if (!(f->cond = SDL_CreateCond())) {
697  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
698  return AVERROR(ENOMEM);
699  }
700  f->pktq = pktq;
701  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
702  f->keep_last = !!keep_last;
703  for (i = 0; i < f->max_size; i++)
704  if (!(f->queue[i].frame = av_frame_alloc()))
705  return AVERROR(ENOMEM);
706  return 0;
707 }
708 
710 {
711  int i;
712  for (i = 0; i < f->max_size; i++) {
713  Frame *vp = &f->queue[i];
715  av_frame_free(&vp->frame);
716  }
717  SDL_DestroyMutex(f->mutex);
718  SDL_DestroyCond(f->cond);
719 }
720 
722 {
723  SDL_LockMutex(f->mutex);
724  SDL_CondSignal(f->cond);
725  SDL_UnlockMutex(f->mutex);
726 }
727 
729 {
730  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
731 }
732 
734 {
735  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
736 }
737 
739 {
740  return &f->queue[f->rindex];
741 }
742 
744 {
745  /* wait until we have space to put a new frame */
746  SDL_LockMutex(f->mutex);
747  while (f->size >= f->max_size &&
748  !f->pktq->abort_request) {
749  SDL_CondWait(f->cond, f->mutex);
750  }
751  SDL_UnlockMutex(f->mutex);
752 
753  if (f->pktq->abort_request)
754  return NULL;
755 
756  return &f->queue[f->windex];
757 }
758 
760 {
761  /* wait until we have a readable a new frame */
762  SDL_LockMutex(f->mutex);
763  while (f->size - f->rindex_shown <= 0 &&
764  !f->pktq->abort_request) {
765  SDL_CondWait(f->cond, f->mutex);
766  }
767  SDL_UnlockMutex(f->mutex);
768 
769  if (f->pktq->abort_request)
770  return NULL;
771 
772  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
773 }
774 
776 {
777  if (++f->windex == f->max_size)
778  f->windex = 0;
779  SDL_LockMutex(f->mutex);
780  f->size++;
781  SDL_CondSignal(f->cond);
782  SDL_UnlockMutex(f->mutex);
783 }
784 
786 {
787  if (f->keep_last && !f->rindex_shown) {
788  f->rindex_shown = 1;
789  return;
790  }
792  if (++f->rindex == f->max_size)
793  f->rindex = 0;
794  SDL_LockMutex(f->mutex);
795  f->size--;
796  SDL_CondSignal(f->cond);
797  SDL_UnlockMutex(f->mutex);
798 }
799 
800 /* return the number of undisplayed frames in the queue */
802 {
803  return f->size - f->rindex_shown;
804 }
805 
806 /* return last shown position */
808 {
809  Frame *fp = &f->queue[f->rindex];
810  if (f->rindex_shown && fp->serial == f->pktq->serial)
811  return fp->pos;
812  else
813  return -1;
814 }
815 
816 static void decoder_abort(Decoder *d, FrameQueue *fq)
817 {
819  frame_queue_signal(fq);
820  SDL_WaitThread(d->decoder_tid, NULL);
821  d->decoder_tid = NULL;
823 }
824 
825 static inline void fill_rectangle(int x, int y, int w, int h)
826 {
827  SDL_Rect rect;
828  rect.x = x;
829  rect.y = y;
830  rect.w = w;
831  rect.h = h;
832  if (w && h)
833  SDL_RenderFillRect(renderer, &rect);
834 }
835 
836 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
837 {
838  Uint32 format;
839  int access, w, h;
840  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
841  void *pixels;
842  int pitch;
843  if (*texture)
844  SDL_DestroyTexture(*texture);
845  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
846  return -1;
847  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
848  return -1;
849  if (init_texture) {
850  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
851  return -1;
852  memset(pixels, 0, pitch * new_height);
853  SDL_UnlockTexture(*texture);
854  }
855  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
856  }
857  return 0;
858 }
859 
860 static void calculate_display_rect(SDL_Rect *rect,
861  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
862  int pic_width, int pic_height, AVRational pic_sar)
863 {
864  float aspect_ratio;
865  int width, height, x, y;
866 
867  if (pic_sar.num == 0)
868  aspect_ratio = 0;
869  else
870  aspect_ratio = av_q2d(pic_sar);
871 
872  if (aspect_ratio <= 0.0)
873  aspect_ratio = 1.0;
874  aspect_ratio *= (float)pic_width / (float)pic_height;
875 
876  /* XXX: we suppose the screen has a 1.0 pixel ratio */
877  height = scr_height;
878  width = lrint(height * aspect_ratio) & ~1;
879  if (width > scr_width) {
880  width = scr_width;
881  height = lrint(width / aspect_ratio) & ~1;
882  }
883  x = (scr_width - width) / 2;
884  y = (scr_height - height) / 2;
885  rect->x = scr_xleft + x;
886  rect->y = scr_ytop + y;
887  rect->w = FFMAX(width, 1);
888  rect->h = FFMAX(height, 1);
889 }
890 
891 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
892 {
893  int i;
894  *sdl_blendmode = SDL_BLENDMODE_NONE;
895  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
896  if (format == AV_PIX_FMT_RGB32 ||
897  format == AV_PIX_FMT_RGB32_1 ||
898  format == AV_PIX_FMT_BGR32 ||
899  format == AV_PIX_FMT_BGR32_1)
900  *sdl_blendmode = SDL_BLENDMODE_BLEND;
901  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
902  if (format == sdl_texture_format_map[i].format) {
903  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
904  return;
905  }
906  }
907 }
908 
909 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
910  int ret = 0;
911  Uint32 sdl_pix_fmt;
912  SDL_BlendMode sdl_blendmode;
913  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
914  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
915  return -1;
916  switch (sdl_pix_fmt) {
917  case SDL_PIXELFORMAT_UNKNOWN:
918  /* This should only happen if we are not using avfilter... */
919  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
920  frame->width, frame->height, frame->format, frame->width, frame->height,
922  if (*img_convert_ctx != NULL) {
923  uint8_t *pixels[4];
924  int pitch[4];
925  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
926  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
927  0, frame->height, pixels, pitch);
928  SDL_UnlockTexture(*tex);
929  }
930  } else {
931  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
932  ret = -1;
933  }
934  break;
935  case SDL_PIXELFORMAT_IYUV:
936  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
937  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
938  frame->data[1], frame->linesize[1],
939  frame->data[2], frame->linesize[2]);
940  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
941  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
942  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
943  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
944  } else {
945  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
946  return -1;
947  }
948  break;
949  default:
950  if (frame->linesize[0] < 0) {
951  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
952  } else {
953  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
954  }
955  break;
956  }
957  return ret;
958 }
959 
961 {
962 #if SDL_VERSION_ATLEAST(2,0,8)
963  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
964  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
965  if (frame->color_range == AVCOL_RANGE_JPEG)
966  mode = SDL_YUV_CONVERSION_JPEG;
967  else if (frame->colorspace == AVCOL_SPC_BT709)
968  mode = SDL_YUV_CONVERSION_BT709;
969  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
970  mode = SDL_YUV_CONVERSION_BT601;
971  }
972  SDL_SetYUVConversionMode(mode);
973 #endif
974 }
975 
977 {
978  Frame *vp;
979  Frame *sp = NULL;
980  SDL_Rect rect;
981 
982  vp = frame_queue_peek_last(&is->pictq);
983  if (is->subtitle_st) {
984  if (frame_queue_nb_remaining(&is->subpq) > 0) {
985  sp = frame_queue_peek(&is->subpq);
986 
987  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
988  if (!sp->uploaded) {
989  uint8_t* pixels[4];
990  int pitch[4];
991  int i;
992  if (!sp->width || !sp->height) {
993  sp->width = vp->width;
994  sp->height = vp->height;
995  }
996  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
997  return;
998 
999  for (i = 0; i < sp->sub.num_rects; i++) {
1000  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1001 
1002  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1003  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1004  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1005  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1006 
1008  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1009  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1010  0, NULL, NULL, NULL);
1011  if (!is->sub_convert_ctx) {
1012  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1013  return;
1014  }
1015  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1016  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1017  0, sub_rect->h, pixels, pitch);
1018  SDL_UnlockTexture(is->sub_texture);
1019  }
1020  }
1021  sp->uploaded = 1;
1022  }
1023  } else
1024  sp = NULL;
1025  }
1026  }
1027 
1028  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1029 
1030  if (!vp->uploaded) {
1031  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1032  return;
1033  vp->uploaded = 1;
1034  vp->flip_v = vp->frame->linesize[0] < 0;
1035  }
1036 
1038  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1040  if (sp) {
1041 #if USE_ONEPASS_SUBTITLE_RENDER
1042  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1043 #else
1044  int i;
1045  double xratio = (double)rect.w / (double)sp->width;
1046  double yratio = (double)rect.h / (double)sp->height;
1047  for (i = 0; i < sp->sub.num_rects; i++) {
1048  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1049  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1050  .y = rect.y + sub_rect->y * yratio,
1051  .w = sub_rect->w * xratio,
1052  .h = sub_rect->h * yratio};
1053  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1054  }
1055 #endif
1056  }
1057 }
1058 
1059 static inline int compute_mod(int a, int b)
1060 {
1061  return a < 0 ? a%b + b : a%b;
1062 }
1063 
1065 {
1066  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1067  int ch, channels, h, h2;
1068  int64_t time_diff;
1069  int rdft_bits, nb_freq;
1070 
1071  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1072  ;
1073  nb_freq = 1 << (rdft_bits - 1);
1074 
1075  /* compute display index : center on currently output samples */
1076  channels = s->audio_tgt.channels;
1077  nb_display_channels = channels;
1078  if (!s->paused) {
1079  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1080  n = 2 * channels;
1081  delay = s->audio_write_buf_size;
1082  delay /= n;
1083 
1084  /* to be more precise, we take into account the time spent since
1085  the last buffer computation */
1086  if (audio_callback_time) {
1087  time_diff = av_gettime_relative() - audio_callback_time;
1088  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1089  }
1090 
1091  delay += 2 * data_used;
1092  if (delay < data_used)
1093  delay = data_used;
1094 
1095  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1096  if (s->show_mode == SHOW_MODE_WAVES) {
1097  h = INT_MIN;
1098  for (i = 0; i < 1000; i += channels) {
1099  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1100  int a = s->sample_array[idx];
1101  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1102  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1103  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1104  int score = a - d;
1105  if (h < score && (b ^ c) < 0) {
1106  h = score;
1107  i_start = idx;
1108  }
1109  }
1110  }
1111 
1112  s->last_i_start = i_start;
1113  } else {
1114  i_start = s->last_i_start;
1115  }
1116 
1117  if (s->show_mode == SHOW_MODE_WAVES) {
1118  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1119 
1120  /* total height for one channel */
1121  h = s->height / nb_display_channels;
1122  /* graph height / 2 */
1123  h2 = (h * 9) / 20;
1124  for (ch = 0; ch < nb_display_channels; ch++) {
1125  i = i_start + ch;
1126  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1127  for (x = 0; x < s->width; x++) {
1128  y = (s->sample_array[i] * h2) >> 15;
1129  if (y < 0) {
1130  y = -y;
1131  ys = y1 - y;
1132  } else {
1133  ys = y1;
1134  }
1135  fill_rectangle(s->xleft + x, ys, 1, y);
1136  i += channels;
1137  if (i >= SAMPLE_ARRAY_SIZE)
1138  i -= SAMPLE_ARRAY_SIZE;
1139  }
1140  }
1141 
1142  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1143 
1144  for (ch = 1; ch < nb_display_channels; ch++) {
1145  y = s->ytop + ch * h;
1146  fill_rectangle(s->xleft, y, s->width, 1);
1147  }
1148  } else {
1149  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1150  return;
1151 
1152  nb_display_channels= FFMIN(nb_display_channels, 2);
1153  if (rdft_bits != s->rdft_bits) {
1154  av_rdft_end(s->rdft);
1155  av_free(s->rdft_data);
1156  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1157  s->rdft_bits = rdft_bits;
1158  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1159  }
1160  if (!s->rdft || !s->rdft_data){
1161  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1162  s->show_mode = SHOW_MODE_WAVES;
1163  } else {
1164  FFTSample *data[2];
1165  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1166  uint32_t *pixels;
1167  int pitch;
1168  for (ch = 0; ch < nb_display_channels; ch++) {
1169  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1170  i = i_start + ch;
1171  for (x = 0; x < 2 * nb_freq; x++) {
1172  double w = (x-nb_freq) * (1.0 / nb_freq);
1173  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1174  i += channels;
1175  if (i >= SAMPLE_ARRAY_SIZE)
1176  i -= SAMPLE_ARRAY_SIZE;
1177  }
1178  av_rdft_calc(s->rdft, data[ch]);
1179  }
1180  /* Least efficient way to do this, we should of course
1181  * directly access it but it is more than fast enough. */
1182  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1183  pitch >>= 2;
1184  pixels += pitch * s->height;
1185  for (y = 0; y < s->height; y++) {
1186  double w = 1 / sqrt(nb_freq);
1187  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1188  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1189  : a;
1190  a = FFMIN(a, 255);
1191  b = FFMIN(b, 255);
1192  pixels -= pitch;
1193  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1194  }
1195  SDL_UnlockTexture(s->vis_texture);
1196  }
1197  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1198  }
1199  if (!s->paused)
1200  s->xpos++;
1201  if (s->xpos >= s->width)
1202  s->xpos= s->xleft;
1203  }
1204 }
1205 
1206 static void stream_component_close(VideoState *is, int stream_index)
1207 {
1208  AVFormatContext *ic = is->ic;
1209  AVCodecParameters *codecpar;
1210 
1211  if (stream_index < 0 || stream_index >= ic->nb_streams)
1212  return;
1213  codecpar = ic->streams[stream_index]->codecpar;
1214 
1215  switch (codecpar->codec_type) {
1216  case AVMEDIA_TYPE_AUDIO:
1217  decoder_abort(&is->auddec, &is->sampq);
1218  SDL_CloseAudioDevice(audio_dev);
1219  decoder_destroy(&is->auddec);
1220  swr_free(&is->swr_ctx);
1221  av_freep(&is->audio_buf1);
1222  is->audio_buf1_size = 0;
1223  is->audio_buf = NULL;
1224 
1225  if (is->rdft) {
1226  av_rdft_end(is->rdft);
1227  av_freep(&is->rdft_data);
1228  is->rdft = NULL;
1229  is->rdft_bits = 0;
1230  }
1231  break;
1232  case AVMEDIA_TYPE_VIDEO:
1233  decoder_abort(&is->viddec, &is->pictq);
1234  decoder_destroy(&is->viddec);
1235  break;
1236  case AVMEDIA_TYPE_SUBTITLE:
1237  decoder_abort(&is->subdec, &is->subpq);
1238  decoder_destroy(&is->subdec);
1239  break;
1240  default:
1241  break;
1242  }
1243 
1244  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1245  switch (codecpar->codec_type) {
1246  case AVMEDIA_TYPE_AUDIO:
1247  is->audio_st = NULL;
1248  is->audio_stream = -1;
1249  break;
1250  case AVMEDIA_TYPE_VIDEO:
1251  is->video_st = NULL;
1252  is->video_stream = -1;
1253  break;
1254  case AVMEDIA_TYPE_SUBTITLE:
1255  is->subtitle_st = NULL;
1256  is->subtitle_stream = -1;
1257  break;
1258  default:
1259  break;
1260  }
1261 }
1262 
1263 static void stream_close(VideoState *is)
1264 {
1265  /* XXX: use a special url_shutdown call to abort parse cleanly */
1266  is->abort_request = 1;
1267  SDL_WaitThread(is->read_tid, NULL);
1268 
1269  /* close each stream */
1270  if (is->audio_stream >= 0)
1272  if (is->video_stream >= 0)
1274  if (is->subtitle_stream >= 0)
1276 
1277  avformat_close_input(&is->ic);
1278 
1282 
1283  /* free all pictures */
1284  frame_queue_destory(&is->pictq);
1285  frame_queue_destory(&is->sampq);
1286  frame_queue_destory(&is->subpq);
1287  SDL_DestroyCond(is->continue_read_thread);
1290  av_free(is->filename);
1291  if (is->vis_texture)
1292  SDL_DestroyTexture(is->vis_texture);
1293  if (is->vid_texture)
1294  SDL_DestroyTexture(is->vid_texture);
1295  if (is->sub_texture)
1296  SDL_DestroyTexture(is->sub_texture);
1297  av_free(is);
1298 }
1299 
1300 static void do_exit(VideoState *is)
1301 {
1302  if (is) {
1303  stream_close(is);
1304  }
1305  if (renderer)
1306  SDL_DestroyRenderer(renderer);
1307  if (window)
1308  SDL_DestroyWindow(window);
1309  uninit_opts();
1310 #if CONFIG_AVFILTER
1311  av_freep(&vfilters_list);
1312 #endif
1314  if (show_status)
1315  printf("\n");
1316  SDL_Quit();
1317  av_log(NULL, AV_LOG_QUIET, "%s", "");
1318  exit(0);
1319 }
1320 
1321 static void sigterm_handler(int sig)
1322 {
1323  exit(123);
1324 }
1325 
1327 {
1328  SDL_Rect rect;
1329  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1330  default_width = rect.w;
1331  default_height = rect.h;
1332 }
1333 
1334 static int video_open(VideoState *is)
1335 {
1336  int w,h;
1337 
1338  if (screen_width) {
1339  w = screen_width;
1340  h = screen_height;
1341  } else {
1342  w = default_width;
1343  h = default_height;
1344  }
1345 
1346  if (!window_title)
1348  SDL_SetWindowTitle(window, window_title);
1349 
1350  SDL_SetWindowSize(window, w, h);
1351  SDL_SetWindowPosition(window, screen_left, screen_top);
1352  if (is_full_screen)
1353  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1354  SDL_ShowWindow(window);
1355 
1356  is->width = w;
1357  is->height = h;
1358 
1359  return 0;
1360 }
1361 
1362 /* display the current picture, if any */
1363 static void video_display(VideoState *is)
1364 {
1365  if (!is->width)
1366  video_open(is);
1367 
1368  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1369  SDL_RenderClear(renderer);
1370  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1371  video_audio_display(is);
1372  else if (is->video_st)
1373  video_image_display(is);
1374  SDL_RenderPresent(renderer);
1375 }
1376 
1377 static double get_clock(Clock *c)
1378 {
1379  if (*c->queue_serial != c->serial)
1380  return NAN;
1381  if (c->paused) {
1382  return c->pts;
1383  } else {
1384  double time = av_gettime_relative() / 1000000.0;
1385  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1386  }
1387 }
1388 
1389 static void set_clock_at(Clock *c, double pts, int serial, double time)
1390 {
1391  c->pts = pts;
1392  c->last_updated = time;
1393  c->pts_drift = c->pts - time;
1394  c->serial = serial;
1395 }
1396 
1397 static void set_clock(Clock *c, double pts, int serial)
1398 {
1399  double time = av_gettime_relative() / 1000000.0;
1400  set_clock_at(c, pts, serial, time);
1401 }
1402 
1403 static void set_clock_speed(Clock *c, double speed)
1404 {
1405  set_clock(c, get_clock(c), c->serial);
1406  c->speed = speed;
1407 }
1408 
1409 static void init_clock(Clock *c, int *queue_serial)
1410 {
1411  c->speed = 1.0;
1412  c->paused = 0;
1413  c->queue_serial = queue_serial;
1414  set_clock(c, NAN, -1);
1415 }
1416 
1417 static void sync_clock_to_slave(Clock *c, Clock *slave)
1418 {
1419  double clock = get_clock(c);
1420  double slave_clock = get_clock(slave);
1421  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1422  set_clock(c, slave_clock, slave->serial);
1423 }
1424 
1426  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1427  if (is->video_st)
1428  return AV_SYNC_VIDEO_MASTER;
1429  else
1430  return AV_SYNC_AUDIO_MASTER;
1431  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1432  if (is->audio_st)
1433  return AV_SYNC_AUDIO_MASTER;
1434  else
1435  return AV_SYNC_EXTERNAL_CLOCK;
1436  } else {
1437  return AV_SYNC_EXTERNAL_CLOCK;
1438  }
1439 }
1440 
1441 /* get the current master clock value */
1442 static double get_master_clock(VideoState *is)
1443 {
1444  double val;
1445 
1446  switch (get_master_sync_type(is)) {
1447  case AV_SYNC_VIDEO_MASTER:
1448  val = get_clock(&is->vidclk);
1449  break;
1450  case AV_SYNC_AUDIO_MASTER:
1451  val = get_clock(&is->audclk);
1452  break;
1453  default:
1454  val = get_clock(&is->extclk);
1455  break;
1456  }
1457  return val;
1458 }
1459 
1461  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1464  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1467  } else {
1468  double speed = is->extclk.speed;
1469  if (speed != 1.0)
1470  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1471  }
1472 }
1473 
1474 /* seek in the stream */
1475 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1476 {
1477  if (!is->seek_req) {
1478  is->seek_pos = pos;
1479  is->seek_rel = rel;
1480  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1481  if (seek_by_bytes)
1483  is->seek_req = 1;
1484  SDL_CondSignal(is->continue_read_thread);
1485  }
1486 }
1487 
1488 /* pause or resume the video */
1490 {
1491  if (is->paused) {
1492  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1493  if (is->read_pause_return != AVERROR(ENOSYS)) {
1494  is->vidclk.paused = 0;
1495  }
1496  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1497  }
1498  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1499  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1500 }
1501 
1502 static void toggle_pause(VideoState *is)
1503 {
1504  stream_toggle_pause(is);
1505  is->step = 0;
1506 }
1507 
1508 static void toggle_mute(VideoState *is)
1509 {
1510  is->muted = !is->muted;
1511 }
1512 
1513 static void update_volume(VideoState *is, int sign, double step)
1514 {
1515  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1516  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1517  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1518 }
1519 
1521 {
1522  /* if the stream is paused unpause it, then step */
1523  if (is->paused)
1524  stream_toggle_pause(is);
1525  is->step = 1;
1526 }
1527 
1528 static double compute_target_delay(double delay, VideoState *is)
1529 {
1530  double sync_threshold, diff = 0;
1531 
1532  /* update delay to follow master synchronisation source */
1534  /* if video is slave, we try to correct big delays by
1535  duplicating or deleting a frame */
1536  diff = get_clock(&is->vidclk) - get_master_clock(is);
1537 
1538  /* skip or repeat frame. We take into account the
1539  delay to compute the threshold. I still don't know
1540  if it is the best guess */
1541  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1542  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1543  if (diff <= -sync_threshold)
1544  delay = FFMAX(0, delay + diff);
1545  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1546  delay = delay + diff;
1547  else if (diff >= sync_threshold)
1548  delay = 2 * delay;
1549  }
1550  }
1551 
1552  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1553  delay, -diff);
1554 
1555  return delay;
1556 }
1557 
1558 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1559  if (vp->serial == nextvp->serial) {
1560  double duration = nextvp->pts - vp->pts;
1561  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1562  return vp->duration;
1563  else
1564  return duration;
1565  } else {
1566  return 0.0;
1567  }
1568 }
1569 
1570 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1571  /* update current video pts */
1572  set_clock(&is->vidclk, pts, serial);
1573  sync_clock_to_slave(&is->extclk, &is->vidclk);
1574 }
1575 
1576 /* called to display each frame */
1577 static void video_refresh(void *opaque, double *remaining_time)
1578 {
1579  VideoState *is = opaque;
1580  double time;
1581 
1582  Frame *sp, *sp2;
1583 
1584  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1586 
1587  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1588  time = av_gettime_relative() / 1000000.0;
1589  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1590  video_display(is);
1591  is->last_vis_time = time;
1592  }
1593  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1594  }
1595 
1596  if (is->video_st) {
1597 retry:
1598  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1599  // nothing to do, no picture to display in the queue
1600  } else {
1601  double last_duration, duration, delay;
1602  Frame *vp, *lastvp;
1603 
1604  /* dequeue the picture */
1605  lastvp = frame_queue_peek_last(&is->pictq);
1606  vp = frame_queue_peek(&is->pictq);
1607 
1608  if (vp->serial != is->videoq.serial) {
1609  frame_queue_next(&is->pictq);
1610  goto retry;
1611  }
1612 
1613  if (lastvp->serial != vp->serial)
1614  is->frame_timer = av_gettime_relative() / 1000000.0;
1615 
1616  if (is->paused)
1617  goto display;
1618 
1619  /* compute nominal last_duration */
1620  last_duration = vp_duration(is, lastvp, vp);
1621  delay = compute_target_delay(last_duration, is);
1622 
1623  time= av_gettime_relative()/1000000.0;
1624  if (time < is->frame_timer + delay) {
1625  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1626  goto display;
1627  }
1628 
1629  is->frame_timer += delay;
1630  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1631  is->frame_timer = time;
1632 
1633  SDL_LockMutex(is->pictq.mutex);
1634  if (!isnan(vp->pts))
1635  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1636  SDL_UnlockMutex(is->pictq.mutex);
1637 
1638  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1639  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1640  duration = vp_duration(is, vp, nextvp);
1641  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1642  is->frame_drops_late++;
1643  frame_queue_next(&is->pictq);
1644  goto retry;
1645  }
1646  }
1647 
1648  if (is->subtitle_st) {
1649  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1650  sp = frame_queue_peek(&is->subpq);
1651 
1652  if (frame_queue_nb_remaining(&is->subpq) > 1)
1653  sp2 = frame_queue_peek_next(&is->subpq);
1654  else
1655  sp2 = NULL;
1656 
1657  if (sp->serial != is->subtitleq.serial
1658  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1659  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1660  {
1661  if (sp->uploaded) {
1662  int i;
1663  for (i = 0; i < sp->sub.num_rects; i++) {
1664  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1665  uint8_t *pixels;
1666  int pitch, j;
1667 
1668  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1669  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1670  memset(pixels, 0, sub_rect->w << 2);
1671  SDL_UnlockTexture(is->sub_texture);
1672  }
1673  }
1674  }
1675  frame_queue_next(&is->subpq);
1676  } else {
1677  break;
1678  }
1679  }
1680  }
1681 
1682  frame_queue_next(&is->pictq);
1683  is->force_refresh = 1;
1684 
1685  if (is->step && !is->paused)
1686  stream_toggle_pause(is);
1687  }
1688 display:
1689  /* display picture */
1690  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1691  video_display(is);
1692  }
1693  is->force_refresh = 0;
1694  if (show_status) {
1695  static int64_t last_time;
1696  int64_t cur_time;
1697  int aqsize, vqsize, sqsize;
1698  double av_diff;
1699 
1700  cur_time = av_gettime_relative();
1701  if (!last_time || (cur_time - last_time) >= 30000) {
1702  aqsize = 0;
1703  vqsize = 0;
1704  sqsize = 0;
1705  if (is->audio_st)
1706  aqsize = is->audioq.size;
1707  if (is->video_st)
1708  vqsize = is->videoq.size;
1709  if (is->subtitle_st)
1710  sqsize = is->subtitleq.size;
1711  av_diff = 0;
1712  if (is->audio_st && is->video_st)
1713  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1714  else if (is->video_st)
1715  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1716  else if (is->audio_st)
1717  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1719  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1720  get_master_clock(is),
1721  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1722  av_diff,
1724  aqsize / 1024,
1725  vqsize / 1024,
1726  sqsize,
1729  fflush(stdout);
1730  last_time = cur_time;
1731  }
1732  }
1733 }
1734 
1735 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1736 {
1737  Frame *vp;
1738 
1739 #if defined(DEBUG_SYNC)
1740  printf("frame_type=%c pts=%0.3f\n",
1741  av_get_picture_type_char(src_frame->pict_type), pts);
1742 #endif
1743 
1744  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1745  return -1;
1746 
1747  vp->sar = src_frame->sample_aspect_ratio;
1748  vp->uploaded = 0;
1749 
1750  vp->width = src_frame->width;
1751  vp->height = src_frame->height;
1752  vp->format = src_frame->format;
1753 
1754  vp->pts = pts;
1755  vp->duration = duration;
1756  vp->pos = pos;
1757  vp->serial = serial;
1758 
1759  set_default_window_size(vp->width, vp->height, vp->sar);
1760 
1761  av_frame_move_ref(vp->frame, src_frame);
1762  frame_queue_push(&is->pictq);
1763  return 0;
1764 }
1765 
1767 {
1768  int got_picture;
1769 
1770  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1771  return -1;
1772 
1773  if (got_picture) {
1774  double dpts = NAN;
1775 
1776  if (frame->pts != AV_NOPTS_VALUE)
1777  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1778 
1779  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1780 
1782  if (frame->pts != AV_NOPTS_VALUE) {
1783  double diff = dpts - get_master_clock(is);
1784  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1785  diff - is->frame_last_filter_delay < 0 &&
1786  is->viddec.pkt_serial == is->vidclk.serial &&
1787  is->videoq.nb_packets) {
1788  is->frame_drops_early++;
1789  av_frame_unref(frame);
1790  got_picture = 0;
1791  }
1792  }
1793  }
1794  }
1795 
1796  return got_picture;
1797 }
1798 
1799 #if CONFIG_AVFILTER
1800 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1801  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1802 {
1803  int ret, i;
1804  int nb_filters = graph->nb_filters;
1806 
1807  if (filtergraph) {
1808  outputs = avfilter_inout_alloc();
1809  inputs = avfilter_inout_alloc();
1810  if (!outputs || !inputs) {
1811  ret = AVERROR(ENOMEM);
1812  goto fail;
1813  }
1814 
1815  outputs->name = av_strdup("in");
1816  outputs->filter_ctx = source_ctx;
1817  outputs->pad_idx = 0;
1818  outputs->next = NULL;
1819 
1820  inputs->name = av_strdup("out");
1821  inputs->filter_ctx = sink_ctx;
1822  inputs->pad_idx = 0;
1823  inputs->next = NULL;
1824 
1825  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1826  goto fail;
1827  } else {
1828  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1829  goto fail;
1830  }
1831 
1832  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1833  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1834  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1835 
1836  ret = avfilter_graph_config(graph, NULL);
1837 fail:
1838  avfilter_inout_free(&outputs);
1839  avfilter_inout_free(&inputs);
1840  return ret;
1841 }
1842 
1843 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1844 {
1846  char sws_flags_str[512] = "";
1847  char buffersrc_args[256];
1848  int ret;
1849  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1850  AVCodecParameters *codecpar = is->video_st->codecpar;
1851  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1852  AVDictionaryEntry *e = NULL;
1853  int nb_pix_fmts = 0;
1854  int i, j;
1855 
1856  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1857  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1858  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1859  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1860  break;
1861  }
1862  }
1863  }
1864  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1865 
1866  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1867  if (!strcmp(e->key, "sws_flags")) {
1868  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1869  } else
1870  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1871  }
1872  if (strlen(sws_flags_str))
1873  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1874 
1875  graph->scale_sws_opts = av_strdup(sws_flags_str);
1876 
1877  snprintf(buffersrc_args, sizeof(buffersrc_args),
1878  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1879  frame->width, frame->height, frame->format,
1881  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1882  if (fr.num && fr.den)
1883  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1884 
1885  if ((ret = avfilter_graph_create_filter(&filt_src,
1886  avfilter_get_by_name("buffer"),
1887  "ffplay_buffer", buffersrc_args, NULL,
1888  graph)) < 0)
1889  goto fail;
1890 
1891  ret = avfilter_graph_create_filter(&filt_out,
1892  avfilter_get_by_name("buffersink"),
1893  "ffplay_buffersink", NULL, NULL, graph);
1894  if (ret < 0)
1895  goto fail;
1896 
1897  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1898  goto fail;
1899 
1900  last_filter = filt_out;
1901 
1902 /* Note: this macro adds a filter before the lastly added filter, so the
1903  * processing order of the filters is in reverse */
1904 #define INSERT_FILT(name, arg) do { \
1905  AVFilterContext *filt_ctx; \
1906  \
1907  ret = avfilter_graph_create_filter(&filt_ctx, \
1908  avfilter_get_by_name(name), \
1909  "ffplay_" name, arg, NULL, graph); \
1910  if (ret < 0) \
1911  goto fail; \
1912  \
1913  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1914  if (ret < 0) \
1915  goto fail; \
1916  \
1917  last_filter = filt_ctx; \
1918 } while (0)
1919 
1920  if (autorotate) {
1921  double theta = get_rotation(is->video_st);
1922 
1923  if (fabs(theta - 90) < 1.0) {
1924  INSERT_FILT("transpose", "clock");
1925  } else if (fabs(theta - 180) < 1.0) {
1926  INSERT_FILT("hflip", NULL);
1927  INSERT_FILT("vflip", NULL);
1928  } else if (fabs(theta - 270) < 1.0) {
1929  INSERT_FILT("transpose", "cclock");
1930  } else if (fabs(theta) > 1.0) {
1931  char rotate_buf[64];
1932  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1933  INSERT_FILT("rotate", rotate_buf);
1934  }
1935  }
1936 
1937  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1938  goto fail;
1939 
1940  is->in_video_filter = filt_src;
1941  is->out_video_filter = filt_out;
1942 
1943 fail:
1944  return ret;
1945 }
1946 
1947 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1948 {
1950  int sample_rates[2] = { 0, -1 };
1951  int64_t channel_layouts[2] = { 0, -1 };
1952  int channels[2] = { 0, -1 };
1953  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1954  char aresample_swr_opts[512] = "";
1955  AVDictionaryEntry *e = NULL;
1956  char asrc_args[256];
1957  int ret;
1958 
1959  avfilter_graph_free(&is->agraph);
1960  if (!(is->agraph = avfilter_graph_alloc()))
1961  return AVERROR(ENOMEM);
1962 
1963  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1964  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1965  if (strlen(aresample_swr_opts))
1966  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1967  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1968 
1969  ret = snprintf(asrc_args, sizeof(asrc_args),
1970  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1971  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1972  is->audio_filter_src.channels,
1973  1, is->audio_filter_src.freq);
1974  if (is->audio_filter_src.channel_layout)
1975  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1976  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1977 
1978  ret = avfilter_graph_create_filter(&filt_asrc,
1979  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1980  asrc_args, NULL, is->agraph);
1981  if (ret < 0)
1982  goto end;
1983 
1984 
1985  ret = avfilter_graph_create_filter(&filt_asink,
1986  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1987  NULL, NULL, is->agraph);
1988  if (ret < 0)
1989  goto end;
1990 
1991  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto end;
1993  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1994  goto end;
1995 
1996  if (force_output_format) {
1997  channel_layouts[0] = is->audio_tgt.channel_layout;
1998  channels [0] = is->audio_tgt.channels;
1999  sample_rates [0] = is->audio_tgt.freq;
2000  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2001  goto end;
2002  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2003  goto end;
2004  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2005  goto end;
2006  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2007  goto end;
2008  }
2009 
2010 
2011  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2012  goto end;
2013 
2014  is->in_audio_filter = filt_asrc;
2015  is->out_audio_filter = filt_asink;
2016 
2017 end:
2018  if (ret < 0)
2019  avfilter_graph_free(&is->agraph);
2020  return ret;
2021 }
2022 #endif /* CONFIG_AVFILTER */
2023 
2024 static int audio_thread(void *arg)
2025 {
2026  VideoState *is = arg;
2027  AVFrame *frame = av_frame_alloc();
2028  Frame *af;
2029 #if CONFIG_AVFILTER
2030  int last_serial = -1;
2031  int64_t dec_channel_layout;
2032  int reconfigure;
2033 #endif
2034  int got_frame = 0;
2035  AVRational tb;
2036  int ret = 0;
2037 
2038  if (!frame)
2039  return AVERROR(ENOMEM);
2040 
2041  do {
2042  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2043  goto the_end;
2044 
2045  if (got_frame) {
2046  tb = (AVRational){1, frame->sample_rate};
2047 
2048 #if CONFIG_AVFILTER
2049  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2050 
2051  reconfigure =
2052  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2053  frame->format, frame->channels) ||
2054  is->audio_filter_src.channel_layout != dec_channel_layout ||
2055  is->audio_filter_src.freq != frame->sample_rate ||
2056  is->auddec.pkt_serial != last_serial;
2057 
2058  if (reconfigure) {
2059  char buf1[1024], buf2[1024];
2060  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2061  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2063  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2064  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2065  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2066 
2067  is->audio_filter_src.fmt = frame->format;
2068  is->audio_filter_src.channels = frame->channels;
2069  is->audio_filter_src.channel_layout = dec_channel_layout;
2070  is->audio_filter_src.freq = frame->sample_rate;
2071  last_serial = is->auddec.pkt_serial;
2072 
2073  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2074  goto the_end;
2075  }
2076 
2077  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2078  goto the_end;
2079 
2080  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2081  tb = av_buffersink_get_time_base(is->out_audio_filter);
2082 #endif
2083  if (!(af = frame_queue_peek_writable(&is->sampq)))
2084  goto the_end;
2085 
2086  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2087  af->pos = frame->pkt_pos;
2088  af->serial = is->auddec.pkt_serial;
2089  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2090 
2091  av_frame_move_ref(af->frame, frame);
2092  frame_queue_push(&is->sampq);
2093 
2094 #if CONFIG_AVFILTER
2095  if (is->audioq.serial != is->auddec.pkt_serial)
2096  break;
2097  }
2098  if (ret == AVERROR_EOF)
2099  is->auddec.finished = is->auddec.pkt_serial;
2100 #endif
2101  }
2102  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2103  the_end:
2104 #if CONFIG_AVFILTER
2105  avfilter_graph_free(&is->agraph);
2106 #endif
2107  av_frame_free(&frame);
2108  return ret;
2109 }
2110 
2111 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2112 {
2114  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2115  if (!d->decoder_tid) {
2116  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2117  return AVERROR(ENOMEM);
2118  }
2119  return 0;
2120 }
2121 
2122 static int video_thread(void *arg)
2123 {
2124  VideoState *is = arg;
2125  AVFrame *frame = av_frame_alloc();
2126  double pts;
2127  double duration;
2128  int ret;
2130  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2131 
2132 #if CONFIG_AVFILTER
2134  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2135  int last_w = 0;
2136  int last_h = 0;
2137  enum AVPixelFormat last_format = -2;
2138  int last_serial = -1;
2139  int last_vfilter_idx = 0;
2140  if (!graph) {
2141  av_frame_free(&frame);
2142  return AVERROR(ENOMEM);
2143  }
2144 
2145 #endif
2146 
2147  if (!frame) {
2148 #if CONFIG_AVFILTER
2149  avfilter_graph_free(&graph);
2150 #endif
2151  return AVERROR(ENOMEM);
2152  }
2153 
2154  for (;;) {
2155  ret = get_video_frame(is, frame);
2156  if (ret < 0)
2157  goto the_end;
2158  if (!ret)
2159  continue;
2160 
2161 #if CONFIG_AVFILTER
2162  if ( last_w != frame->width
2163  || last_h != frame->height
2164  || last_format != frame->format
2165  || last_serial != is->viddec.pkt_serial
2166  || last_vfilter_idx != is->vfilter_idx) {
2168  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2169  last_w, last_h,
2170  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2171  frame->width, frame->height,
2172  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2173  avfilter_graph_free(&graph);
2174  graph = avfilter_graph_alloc();
2175  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2176  SDL_Event event;
2177  event.type = FF_QUIT_EVENT;
2178  event.user.data1 = is;
2179  SDL_PushEvent(&event);
2180  goto the_end;
2181  }
2182  filt_in = is->in_video_filter;
2183  filt_out = is->out_video_filter;
2184  last_w = frame->width;
2185  last_h = frame->height;
2186  last_format = frame->format;
2187  last_serial = is->viddec.pkt_serial;
2188  last_vfilter_idx = is->vfilter_idx;
2189  frame_rate = av_buffersink_get_frame_rate(filt_out);
2190  }
2191 
2192  ret = av_buffersrc_add_frame(filt_in, frame);
2193  if (ret < 0)
2194  goto the_end;
2195 
2196  while (ret >= 0) {
2197  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2198 
2199  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2200  if (ret < 0) {
2201  if (ret == AVERROR_EOF)
2202  is->viddec.finished = is->viddec.pkt_serial;
2203  ret = 0;
2204  break;
2205  }
2206 
2208  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2209  is->frame_last_filter_delay = 0;
2210  tb = av_buffersink_get_time_base(filt_out);
2211 #endif
2212  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2213  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2214  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2215  av_frame_unref(frame);
2216 #if CONFIG_AVFILTER
2217  if (is->videoq.serial != is->viddec.pkt_serial)
2218  break;
2219  }
2220 #endif
2221 
2222  if (ret < 0)
2223  goto the_end;
2224  }
2225  the_end:
2226 #if CONFIG_AVFILTER
2227  avfilter_graph_free(&graph);
2228 #endif
2229  av_frame_free(&frame);
2230  return 0;
2231 }
2232 
2233 static int subtitle_thread(void *arg)
2234 {
2235  VideoState *is = arg;
2236  Frame *sp;
2237  int got_subtitle;
2238  double pts;
2239 
2240  for (;;) {
2241  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2242  return 0;
2243 
2244  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2245  break;
2246 
2247  pts = 0;
2248 
2249  if (got_subtitle && sp->sub.format == 0) {
2250  if (sp->sub.pts != AV_NOPTS_VALUE)
2251  pts = sp->sub.pts / (double)AV_TIME_BASE;
2252  sp->pts = pts;
2253  sp->serial = is->subdec.pkt_serial;
2254  sp->width = is->subdec.avctx->width;
2255  sp->height = is->subdec.avctx->height;
2256  sp->uploaded = 0;
2257 
2258  /* now we can update the picture count */
2259  frame_queue_push(&is->subpq);
2260  } else if (got_subtitle) {
2261  avsubtitle_free(&sp->sub);
2262  }
2263  }
2264  return 0;
2265 }
2266 
2267 /* copy samples for viewing in editor window */
2268 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2269 {
2270  int size, len;
2271 
2272  size = samples_size / sizeof(short);
2273  while (size > 0) {
2275  if (len > size)
2276  len = size;
2277  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2278  samples += len;
2279  is->sample_array_index += len;
2281  is->sample_array_index = 0;
2282  size -= len;
2283  }
2284 }
2285 
2286 /* return the wanted number of samples to get better sync if sync_type is video
2287  * or external master clock */
2288 static int synchronize_audio(VideoState *is, int nb_samples)
2289 {
2290  int wanted_nb_samples = nb_samples;
2291 
2292  /* if not master, then we try to remove or add samples to correct the clock */
2294  double diff, avg_diff;
2295  int min_nb_samples, max_nb_samples;
2296 
2297  diff = get_clock(&is->audclk) - get_master_clock(is);
2298 
2299  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2300  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2302  /* not enough measures to have a correct estimate */
2303  is->audio_diff_avg_count++;
2304  } else {
2305  /* estimate the A-V difference */
2306  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2307 
2308  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2309  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2310  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2311  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2312  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2313  }
2314  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2315  diff, avg_diff, wanted_nb_samples - nb_samples,
2317  }
2318  } else {
2319  /* too big difference : may be initial PTS errors, so
2320  reset A-V filter */
2321  is->audio_diff_avg_count = 0;
2322  is->audio_diff_cum = 0;
2323  }
2324  }
2325 
2326  return wanted_nb_samples;
2327 }
2328 
2329 /**
2330  * Decode one audio frame and return its uncompressed size.
2331  *
2332  * The processed audio frame is decoded, converted if required, and
2333  * stored in is->audio_buf, with size in bytes given by the return
2334  * value.
2335  */
2337 {
2338  int data_size, resampled_data_size;
2339  int64_t dec_channel_layout;
2340  av_unused double audio_clock0;
2341  int wanted_nb_samples;
2342  Frame *af;
2343 
2344  if (is->paused)
2345  return -1;
2346 
2347  do {
2348 #if defined(_WIN32)
2349  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2351  return -1;
2352  av_usleep (1000);
2353  }
2354 #endif
2355  if (!(af = frame_queue_peek_readable(&is->sampq)))
2356  return -1;
2357  frame_queue_next(&is->sampq);
2358  } while (af->serial != is->audioq.serial);
2359 
2360  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2361  af->frame->nb_samples,
2362  af->frame->format, 1);
2363 
2364  dec_channel_layout =
2367  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2368 
2369  if (af->frame->format != is->audio_src.fmt ||
2370  dec_channel_layout != is->audio_src.channel_layout ||
2371  af->frame->sample_rate != is->audio_src.freq ||
2372  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2373  swr_free(&is->swr_ctx);
2376  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2377  0, NULL);
2378  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2380  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2383  swr_free(&is->swr_ctx);
2384  return -1;
2385  }
2386  is->audio_src.channel_layout = dec_channel_layout;
2387  is->audio_src.channels = af->frame->channels;
2388  is->audio_src.freq = af->frame->sample_rate;
2389  is->audio_src.fmt = af->frame->format;
2390  }
2391 
2392  if (is->swr_ctx) {
2393  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2394  uint8_t **out = &is->audio_buf1;
2395  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2396  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2397  int len2;
2398  if (out_size < 0) {
2399  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2400  return -1;
2401  }
2402  if (wanted_nb_samples != af->frame->nb_samples) {
2403  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2404  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2405  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2406  return -1;
2407  }
2408  }
2409  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2410  if (!is->audio_buf1)
2411  return AVERROR(ENOMEM);
2412  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2413  if (len2 < 0) {
2414  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2415  return -1;
2416  }
2417  if (len2 == out_count) {
2418  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2419  if (swr_init(is->swr_ctx) < 0)
2420  swr_free(&is->swr_ctx);
2421  }
2422  is->audio_buf = is->audio_buf1;
2423  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2424  } else {
2425  is->audio_buf = af->frame->data[0];
2426  resampled_data_size = data_size;
2427  }
2428 
2429  audio_clock0 = is->audio_clock;
2430  /* update the audio clock with the pts */
2431  if (!isnan(af->pts))
2432  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2433  else
2434  is->audio_clock = NAN;
2435  is->audio_clock_serial = af->serial;
2436 #ifdef DEBUG
2437  {
2438  static double last_clock;
2439  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2440  is->audio_clock - last_clock,
2441  is->audio_clock, audio_clock0);
2442  last_clock = is->audio_clock;
2443  }
2444 #endif
2445  return resampled_data_size;
2446 }
2447 
2448 /* prepare a new audio buffer */
2449 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2450 {
2451  VideoState *is = opaque;
2452  int audio_size, len1;
2453 
2455 
2456  while (len > 0) {
2457  if (is->audio_buf_index >= is->audio_buf_size) {
2458  audio_size = audio_decode_frame(is);
2459  if (audio_size < 0) {
2460  /* if error, just output silence */
2461  is->audio_buf = NULL;
2463  } else {
2464  if (is->show_mode != SHOW_MODE_VIDEO)
2465  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2466  is->audio_buf_size = audio_size;
2467  }
2468  is->audio_buf_index = 0;
2469  }
2470  len1 = is->audio_buf_size - is->audio_buf_index;
2471  if (len1 > len)
2472  len1 = len;
2473  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2474  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2475  else {
2476  memset(stream, 0, len1);
2477  if (!is->muted && is->audio_buf)
2478  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2479  }
2480  len -= len1;
2481  stream += len1;
2482  is->audio_buf_index += len1;
2483  }
2485  /* Let's assume the audio driver that is used by SDL has two periods. */
2486  if (!isnan(is->audio_clock)) {
2488  sync_clock_to_slave(&is->extclk, &is->audclk);
2489  }
2490 }
2491 
2492 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2493 {
2494  SDL_AudioSpec wanted_spec, spec;
2495  const char *env;
2496  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2497  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2498  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2499 
2500  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2501  if (env) {
2502  wanted_nb_channels = atoi(env);
2503  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2504  }
2505  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2506  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2507  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2508  }
2509  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2510  wanted_spec.channels = wanted_nb_channels;
2511  wanted_spec.freq = wanted_sample_rate;
2512  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2513  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2514  return -1;
2515  }
2516  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2517  next_sample_rate_idx--;
2518  wanted_spec.format = AUDIO_S16SYS;
2519  wanted_spec.silence = 0;
2520  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2521  wanted_spec.callback = sdl_audio_callback;
2522  wanted_spec.userdata = opaque;
2523  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2524  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2525  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2526  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2527  if (!wanted_spec.channels) {
2528  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2529  wanted_spec.channels = wanted_nb_channels;
2530  if (!wanted_spec.freq) {
2532  "No more combinations to try, audio open failed\n");
2533  return -1;
2534  }
2535  }
2536  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2537  }
2538  if (spec.format != AUDIO_S16SYS) {
2540  "SDL advised audio format %d is not supported!\n", spec.format);
2541  return -1;
2542  }
2543  if (spec.channels != wanted_spec.channels) {
2544  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2545  if (!wanted_channel_layout) {
2547  "SDL advised channel count %d is not supported!\n", spec.channels);
2548  return -1;
2549  }
2550  }
2551 
2552  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2553  audio_hw_params->freq = spec.freq;
2554  audio_hw_params->channel_layout = wanted_channel_layout;
2555  audio_hw_params->channels = spec.channels;
2556  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2557  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2558  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2559  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2560  return -1;
2561  }
2562  return spec.size;
2563 }
2564 
2565 /* open a given stream. Return 0 if OK */
2566 static int stream_component_open(VideoState *is, int stream_index)
2567 {
2568  AVFormatContext *ic = is->ic;
2569  AVCodecContext *avctx;
2570  AVCodec *codec;
2571  const char *forced_codec_name = NULL;
2572  AVDictionary *opts = NULL;
2573  AVDictionaryEntry *t = NULL;
2574  int sample_rate, nb_channels;
2575  int64_t channel_layout;
2576  int ret = 0;
2577  int stream_lowres = lowres;
2578 
2579  if (stream_index < 0 || stream_index >= ic->nb_streams)
2580  return -1;
2581 
2582  avctx = avcodec_alloc_context3(NULL);
2583  if (!avctx)
2584  return AVERROR(ENOMEM);
2585 
2586  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2587  if (ret < 0)
2588  goto fail;
2589  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2590 
2591  codec = avcodec_find_decoder(avctx->codec_id);
2592 
2593  switch(avctx->codec_type){
2594  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2595  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2596  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2597  }
2598  if (forced_codec_name)
2599  codec = avcodec_find_decoder_by_name(forced_codec_name);
2600  if (!codec) {
2601  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2602  "No codec could be found with name '%s'\n", forced_codec_name);
2603  else av_log(NULL, AV_LOG_WARNING,
2604  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2605  ret = AVERROR(EINVAL);
2606  goto fail;
2607  }
2608 
2609  avctx->codec_id = codec->id;
2610  if (stream_lowres > codec->max_lowres) {
2611  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2612  codec->max_lowres);
2613  stream_lowres = codec->max_lowres;
2614  }
2615  avctx->lowres = stream_lowres;
2616 
2617  if (fast)
2618  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2619 
2620  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2621  if (!av_dict_get(opts, "threads", NULL, 0))
2622  av_dict_set(&opts, "threads", "auto", 0);
2623  if (stream_lowres)
2624  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2625  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2626  av_dict_set(&opts, "refcounted_frames", "1", 0);
2627  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2628  goto fail;
2629  }
2630  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2631  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2633  goto fail;
2634  }
2635 
2636  is->eof = 0;
2637  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2638  switch (avctx->codec_type) {
2639  case AVMEDIA_TYPE_AUDIO:
2640 #if CONFIG_AVFILTER
2641  {
2642  AVFilterContext *sink;
2643 
2644  is->audio_filter_src.freq = avctx->sample_rate;
2645  is->audio_filter_src.channels = avctx->channels;
2646  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2647  is->audio_filter_src.fmt = avctx->sample_fmt;
2648  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2649  goto fail;
2650  sink = is->out_audio_filter;
2651  sample_rate = av_buffersink_get_sample_rate(sink);
2652  nb_channels = av_buffersink_get_channels(sink);
2653  channel_layout = av_buffersink_get_channel_layout(sink);
2654  }
2655 #else
2656  sample_rate = avctx->sample_rate;
2657  nb_channels = avctx->channels;
2658  channel_layout = avctx->channel_layout;
2659 #endif
2660 
2661  /* prepare audio output */
2662  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2663  goto fail;
2664  is->audio_hw_buf_size = ret;
2665  is->audio_src = is->audio_tgt;
2666  is->audio_buf_size = 0;
2667  is->audio_buf_index = 0;
2668 
2669  /* init averaging filter */
2670  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2671  is->audio_diff_avg_count = 0;
2672  /* since we do not have a precise anough audio FIFO fullness,
2673  we correct audio sync only if larger than this threshold */
2675 
2676  is->audio_stream = stream_index;
2677  is->audio_st = ic->streams[stream_index];
2678 
2679  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2681  is->auddec.start_pts = is->audio_st->start_time;
2683  }
2684  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2685  goto out;
2686  SDL_PauseAudioDevice(audio_dev, 0);
2687  break;
2688  case AVMEDIA_TYPE_VIDEO:
2689  is->video_stream = stream_index;
2690  is->video_st = ic->streams[stream_index];
2691 
2692  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2693  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2694  goto out;
2695  is->queue_attachments_req = 1;
2696  break;
2697  case AVMEDIA_TYPE_SUBTITLE:
2698  is->subtitle_stream = stream_index;
2699  is->subtitle_st = ic->streams[stream_index];
2700 
2701  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2702  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2703  goto out;
2704  break;
2705  default:
2706  break;
2707  }
2708  goto out;
2709 
2710 fail:
2711  avcodec_free_context(&avctx);
2712 out:
2713  av_dict_free(&opts);
2714 
2715  return ret;
2716 }
2717 
2718 static int decode_interrupt_cb(void *ctx)
2719 {
2720  VideoState *is = ctx;
2721  return is->abort_request;
2722 }
2723 
2724 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2725  return stream_id < 0 ||
2726  queue->abort_request ||
2728  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2729 }
2730 
2732 {
2733  if( !strcmp(s->iformat->name, "rtp")
2734  || !strcmp(s->iformat->name, "rtsp")
2735  || !strcmp(s->iformat->name, "sdp")
2736  )
2737  return 1;
2738 
2739  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2740  || !strncmp(s->url, "udp:", 4)
2741  )
2742  )
2743  return 1;
2744  return 0;
2745 }
2746 
2747 /* this thread gets the stream from the disk or the network */
2748 static int read_thread(void *arg)
2749 {
2750  VideoState *is = arg;
2751  AVFormatContext *ic = NULL;
2752  int err, i, ret;
2753  int st_index[AVMEDIA_TYPE_NB];
2754  AVPacket pkt1, *pkt = &pkt1;
2755  int64_t stream_start_time;
2756  int pkt_in_play_range = 0;
2757  AVDictionaryEntry *t;
2758  SDL_mutex *wait_mutex = SDL_CreateMutex();
2759  int scan_all_pmts_set = 0;
2760  int64_t pkt_ts;
2761 
2762  if (!wait_mutex) {
2763  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2764  ret = AVERROR(ENOMEM);
2765  goto fail;
2766  }
2767 
2768  memset(st_index, -1, sizeof(st_index));
2769  is->last_video_stream = is->video_stream = -1;
2770  is->last_audio_stream = is->audio_stream = -1;
2771  is->last_subtitle_stream = is->subtitle_stream = -1;
2772  is->eof = 0;
2773 
2774  ic = avformat_alloc_context();
2775  if (!ic) {
2776  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2777  ret = AVERROR(ENOMEM);
2778  goto fail;
2779  }
2781  ic->interrupt_callback.opaque = is;
2782  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2783  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2784  scan_all_pmts_set = 1;
2785  }
2786  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2787  if (err < 0) {
2788  print_error(is->filename, err);
2789  ret = -1;
2790  goto fail;
2791  }
2792  if (scan_all_pmts_set)
2793  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2794 
2796  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2798  goto fail;
2799  }
2800  is->ic = ic;
2801 
2802  if (genpts)
2803  ic->flags |= AVFMT_FLAG_GENPTS;
2804 
2806 
2807  if (find_stream_info) {
2809  int orig_nb_streams = ic->nb_streams;
2810 
2811  err = avformat_find_stream_info(ic, opts);
2812 
2813  for (i = 0; i < orig_nb_streams; i++)
2814  av_dict_free(&opts[i]);
2815  av_freep(&opts);
2816 
2817  if (err < 0) {
2819  "%s: could not find codec parameters\n", is->filename);
2820  ret = -1;
2821  goto fail;
2822  }
2823  }
2824 
2825  if (ic->pb)
2826  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2827 
2828  if (seek_by_bytes < 0)
2829  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2830 
2831  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2832 
2833  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2834  window_title = av_asprintf("%s - %s", t->value, input_filename);
2835 
2836  /* if seeking requested, we execute it */
2837  if (start_time != AV_NOPTS_VALUE) {
2838  int64_t timestamp;
2839 
2840  timestamp = start_time;
2841  /* add the stream start time */
2842  if (ic->start_time != AV_NOPTS_VALUE)
2843  timestamp += ic->start_time;
2844  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2845  if (ret < 0) {
2846  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2847  is->filename, (double)timestamp / AV_TIME_BASE);
2848  }
2849  }
2850 
2851  is->realtime = is_realtime(ic);
2852 
2853  if (show_status)
2854  av_dump_format(ic, 0, is->filename, 0);
2855 
2856  for (i = 0; i < ic->nb_streams; i++) {
2857  AVStream *st = ic->streams[i];
2858  enum AVMediaType type = st->codecpar->codec_type;
2859  st->discard = AVDISCARD_ALL;
2860  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2861  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2862  st_index[type] = i;
2863  }
2864  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2865  if (wanted_stream_spec[i] && st_index[i] == -1) {
2866  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2867  st_index[i] = INT_MAX;
2868  }
2869  }
2870 
2871  if (!video_disable)
2872  st_index[AVMEDIA_TYPE_VIDEO] =
2874  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2875  if (!audio_disable)
2876  st_index[AVMEDIA_TYPE_AUDIO] =
2878  st_index[AVMEDIA_TYPE_AUDIO],
2879  st_index[AVMEDIA_TYPE_VIDEO],
2880  NULL, 0);
2882  st_index[AVMEDIA_TYPE_SUBTITLE] =
2884  st_index[AVMEDIA_TYPE_SUBTITLE],
2885  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2886  st_index[AVMEDIA_TYPE_AUDIO] :
2887  st_index[AVMEDIA_TYPE_VIDEO]),
2888  NULL, 0);
2889 
2890  is->show_mode = show_mode;
2891  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2892  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2893  AVCodecParameters *codecpar = st->codecpar;
2895  if (codecpar->width)
2896  set_default_window_size(codecpar->width, codecpar->height, sar);
2897  }
2898 
2899  /* open the streams */
2900  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2901  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2902  }
2903 
2904  ret = -1;
2905  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2906  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2907  }
2908  if (is->show_mode == SHOW_MODE_NONE)
2909  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2910 
2911  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2912  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2913  }
2914 
2915  if (is->video_stream < 0 && is->audio_stream < 0) {
2916  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2917  is->filename);
2918  ret = -1;
2919  goto fail;
2920  }
2921 
2922  if (infinite_buffer < 0 && is->realtime)
2923  infinite_buffer = 1;
2924 
2925  for (;;) {
2926  if (is->abort_request)
2927  break;
2928  if (is->paused != is->last_paused) {
2929  is->last_paused = is->paused;
2930  if (is->paused)
2931  is->read_pause_return = av_read_pause(ic);
2932  else
2933  av_read_play(ic);
2934  }
2935 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2936  if (is->paused &&
2937  (!strcmp(ic->iformat->name, "rtsp") ||
2938  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2939  /* wait 10 ms to avoid trying to get another packet */
2940  /* XXX: horrible */
2941  SDL_Delay(10);
2942  continue;
2943  }
2944 #endif
2945  if (is->seek_req) {
2946  int64_t seek_target = is->seek_pos;
2947  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2948  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2949 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2950 // of the seek_pos/seek_rel variables
2951 
2952  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2953  if (ret < 0) {
2955  "%s: error while seeking\n", is->ic->url);
2956  } else {
2957  if (is->audio_stream >= 0) {
2958  packet_queue_flush(&is->audioq);
2959  packet_queue_put(&is->audioq, &flush_pkt);
2960  }
2961  if (is->subtitle_stream >= 0) {
2963  packet_queue_put(&is->subtitleq, &flush_pkt);
2964  }
2965  if (is->video_stream >= 0) {
2966  packet_queue_flush(&is->videoq);
2967  packet_queue_put(&is->videoq, &flush_pkt);
2968  }
2969  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2970  set_clock(&is->extclk, NAN, 0);
2971  } else {
2972  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2973  }
2974  }
2975  is->seek_req = 0;
2976  is->queue_attachments_req = 1;
2977  is->eof = 0;
2978  if (is->paused)
2979  step_to_next_frame(is);
2980  }
2981  if (is->queue_attachments_req) {
2983  AVPacket copy = { 0 };
2984  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2985  goto fail;
2986  packet_queue_put(&is->videoq, &copy);
2988  }
2989  is->queue_attachments_req = 0;
2990  }
2991 
2992  /* if the queue are full, no need to read more */
2993  if (infinite_buffer<1 &&
2994  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2995  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2998  /* wait 10 ms */
2999  SDL_LockMutex(wait_mutex);
3000  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3001  SDL_UnlockMutex(wait_mutex);
3002  continue;
3003  }
3004  if (!is->paused &&
3005  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3006  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3007  if (loop != 1 && (!loop || --loop)) {
3008  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3009  } else if (autoexit) {
3010  ret = AVERROR_EOF;
3011  goto fail;
3012  }
3013  }
3014  ret = av_read_frame(ic, pkt);
3015  if (ret < 0) {
3016  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3017  if (is->video_stream >= 0)
3019  if (is->audio_stream >= 0)
3021  if (is->subtitle_stream >= 0)
3023  is->eof = 1;
3024  }
3025  if (ic->pb && ic->pb->error)
3026  break;
3027  SDL_LockMutex(wait_mutex);
3028  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3029  SDL_UnlockMutex(wait_mutex);
3030  continue;
3031  } else {
3032  is->eof = 0;
3033  }
3034  /* check if packet is in play range specified by user, then queue, otherwise discard */
3035  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3036  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3037  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3038  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3039  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3040  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3041  <= ((double)duration / 1000000);
3042  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3043  packet_queue_put(&is->audioq, pkt);
3044  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3046  packet_queue_put(&is->videoq, pkt);
3047  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3048  packet_queue_put(&is->subtitleq, pkt);
3049  } else {
3050  av_packet_unref(pkt);
3051  }
3052  }
3053 
3054  ret = 0;
3055  fail:
3056  if (ic && !is->ic)
3057  avformat_close_input(&ic);
3058 
3059  if (ret != 0) {
3060  SDL_Event event;
3061 
3062  event.type = FF_QUIT_EVENT;
3063  event.user.data1 = is;
3064  SDL_PushEvent(&event);
3065  }
3066  SDL_DestroyMutex(wait_mutex);
3067  return 0;
3068 }
3069 
3070 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3071 {
3072  VideoState *is;
3073 
3074  is = av_mallocz(sizeof(VideoState));
3075  if (!is)
3076  return NULL;
3077  is->filename = av_strdup(filename);
3078  if (!is->filename)
3079  goto fail;
3080  is->iformat = iformat;
3081  is->ytop = 0;
3082  is->xleft = 0;
3083 
3084  /* start video display */
3085  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3086  goto fail;
3087  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3088  goto fail;
3089  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3090  goto fail;
3091 
3092  if (packet_queue_init(&is->videoq) < 0 ||
3093  packet_queue_init(&is->audioq) < 0 ||
3094  packet_queue_init(&is->subtitleq) < 0)
3095  goto fail;
3096 
3097  if (!(is->continue_read_thread = SDL_CreateCond())) {
3098  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3099  goto fail;
3100  }
3101 
3102  init_clock(&is->vidclk, &is->videoq.serial);
3103  init_clock(&is->audclk, &is->audioq.serial);
3104  init_clock(&is->extclk, &is->extclk.serial);
3105  is->audio_clock_serial = -1;
3106  if (startup_volume < 0)
3107  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3108  if (startup_volume > 100)
3109  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3110  startup_volume = av_clip(startup_volume, 0, 100);
3111  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3113  is->muted = 0;
3114  is->av_sync_type = av_sync_type;
3115  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3116  if (!is->read_tid) {
3117  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3118 fail:
3119  stream_close(is);
3120  return NULL;
3121  }
3122  return is;
3123 }
3124 
3126 {
3127  AVFormatContext *ic = is->ic;
3128  int start_index, stream_index;
3129  int old_index;
3130  AVStream *st;
3131  AVProgram *p = NULL;
3132  int nb_streams = is->ic->nb_streams;
3133 
3134  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3135  start_index = is->last_video_stream;
3136  old_index = is->video_stream;
3137  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3138  start_index = is->last_audio_stream;
3139  old_index = is->audio_stream;
3140  } else {
3141  start_index = is->last_subtitle_stream;
3142  old_index = is->subtitle_stream;
3143  }
3144  stream_index = start_index;
3145 
3146  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3148  if (p) {
3149  nb_streams = p->nb_stream_indexes;
3150  for (start_index = 0; start_index < nb_streams; start_index++)
3151  if (p->stream_index[start_index] == stream_index)
3152  break;
3153  if (start_index == nb_streams)
3154  start_index = -1;
3155  stream_index = start_index;
3156  }
3157  }
3158 
3159  for (;;) {
3160  if (++stream_index >= nb_streams)
3161  {
3162  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3163  {
3164  stream_index = -1;
3165  is->last_subtitle_stream = -1;
3166  goto the_end;
3167  }
3168  if (start_index == -1)
3169  return;
3170  stream_index = 0;
3171  }
3172  if (stream_index == start_index)
3173  return;
3174  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3175  if (st->codecpar->codec_type == codec_type) {
3176  /* check that parameters are OK */
3177  switch (codec_type) {
3178  case AVMEDIA_TYPE_AUDIO:
3179  if (st->codecpar->sample_rate != 0 &&
3180  st->codecpar->channels != 0)
3181  goto the_end;
3182  break;
3183  case AVMEDIA_TYPE_VIDEO:
3184  case AVMEDIA_TYPE_SUBTITLE:
3185  goto the_end;
3186  default:
3187  break;
3188  }
3189  }
3190  }
3191  the_end:
3192  if (p && stream_index != -1)
3193  stream_index = p->stream_index[stream_index];
3194  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3195  av_get_media_type_string(codec_type),
3196  old_index,
3197  stream_index);
3198 
3199  stream_component_close(is, old_index);
3200  stream_component_open(is, stream_index);
3201 }
3202 
3203 
3205 {
3207  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3208 }
3209 
3211 {
3212  int next = is->show_mode;
3213  do {
3214  next = (next + 1) % SHOW_MODE_NB;
3215  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3216  if (is->show_mode != next) {
3217  is->force_refresh = 1;
3218  is->show_mode = next;
3219  }
3220 }
3221 
3222 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3223  double remaining_time = 0.0;
3224  SDL_PumpEvents();
3225  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3227  SDL_ShowCursor(0);
3228  cursor_hidden = 1;
3229  }
3230  if (remaining_time > 0.0)
3231  av_usleep((int64_t)(remaining_time * 1000000.0));
3232  remaining_time = REFRESH_RATE;
3233  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3234  video_refresh(is, &remaining_time);
3235  SDL_PumpEvents();
3236  }
3237 }
3238 
3239 static void seek_chapter(VideoState *is, int incr)
3240 {
3241  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3242  int i;
3243 
3244  if (!is->ic->nb_chapters)
3245  return;
3246 
3247  /* find the current chapter */
3248  for (i = 0; i < is->ic->nb_chapters; i++) {
3249  AVChapter *ch = is->ic->chapters[i];
3250  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3251  i--;
3252  break;
3253  }
3254  }
3255 
3256  i += incr;
3257  i = FFMAX(i, 0);
3258  if (i >= is->ic->nb_chapters)
3259  return;
3260 
3261  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3262  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3263  AV_TIME_BASE_Q), 0, 0);
3264 }
3265 
3266 /* handle an event sent by the GUI */
3267 static void event_loop(VideoState *cur_stream)
3268 {
3269  SDL_Event event;
3270  double incr, pos, frac;
3271 
3272  for (;;) {
3273  double x;
3274  refresh_loop_wait_event(cur_stream, &event);
3275  switch (event.type) {
3276  case SDL_KEYDOWN:
3277  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3278  do_exit(cur_stream);
3279  break;
3280  }
3281  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3282  if (!cur_stream->width)
3283  continue;
3284  switch (event.key.keysym.sym) {
3285  case SDLK_f:
3286  toggle_full_screen(cur_stream);
3287  cur_stream->force_refresh = 1;
3288  break;
3289  case SDLK_p:
3290  case SDLK_SPACE:
3291  toggle_pause(cur_stream);
3292  break;
3293  case SDLK_m:
3294  toggle_mute(cur_stream);
3295  break;
3296  case SDLK_KP_MULTIPLY:
3297  case SDLK_0:
3298  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3299  break;
3300  case SDLK_KP_DIVIDE:
3301  case SDLK_9:
3302  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3303  break;
3304  case SDLK_s: // S: Step to next frame
3305  step_to_next_frame(cur_stream);
3306  break;
3307  case SDLK_a:
3309  break;
3310  case SDLK_v:
3312  break;
3313  case SDLK_c:
3317  break;
3318  case SDLK_t:
3320  break;
3321  case SDLK_w:
3322 #if CONFIG_AVFILTER
3323  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3324  if (++cur_stream->vfilter_idx >= nb_vfilters)
3325  cur_stream->vfilter_idx = 0;
3326  } else {
3327  cur_stream->vfilter_idx = 0;
3328  toggle_audio_display(cur_stream);
3329  }
3330 #else
3331  toggle_audio_display(cur_stream);
3332 #endif
3333  break;
3334  case SDLK_PAGEUP:
3335  if (cur_stream->ic->nb_chapters <= 1) {
3336  incr = 600.0;
3337  goto do_seek;
3338  }
3339  seek_chapter(cur_stream, 1);
3340  break;
3341  case SDLK_PAGEDOWN:
3342  if (cur_stream->ic->nb_chapters <= 1) {
3343  incr = -600.0;
3344  goto do_seek;
3345  }
3346  seek_chapter(cur_stream, -1);
3347  break;
3348  case SDLK_LEFT:
3349  incr = seek_interval ? -seek_interval : -10.0;
3350  goto do_seek;
3351  case SDLK_RIGHT:
3352  incr = seek_interval ? seek_interval : 10.0;
3353  goto do_seek;
3354  case SDLK_UP:
3355  incr = 60.0;
3356  goto do_seek;
3357  case SDLK_DOWN:
3358  incr = -60.0;
3359  do_seek:
3360  if (seek_by_bytes) {
3361  pos = -1;
3362  if (pos < 0 && cur_stream->video_stream >= 0)
3363  pos = frame_queue_last_pos(&cur_stream->pictq);
3364  if (pos < 0 && cur_stream->audio_stream >= 0)
3365  pos = frame_queue_last_pos(&cur_stream->sampq);
3366  if (pos < 0)
3367  pos = avio_tell(cur_stream->ic->pb);
3368  if (cur_stream->ic->bit_rate)
3369  incr *= cur_stream->ic->bit_rate / 8.0;
3370  else
3371  incr *= 180000.0;
3372  pos += incr;
3373  stream_seek(cur_stream, pos, incr, 1);
3374  } else {
3375  pos = get_master_clock(cur_stream);
3376  if (isnan(pos))
3377  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3378  pos += incr;
3379  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3380  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3381  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3382  }
3383  break;
3384  default:
3385  break;
3386  }
3387  break;
3388  case SDL_MOUSEBUTTONDOWN:
3389  if (exit_on_mousedown) {
3390  do_exit(cur_stream);
3391  break;
3392  }
3393  if (event.button.button == SDL_BUTTON_LEFT) {
3394  static int64_t last_mouse_left_click = 0;
3395  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3396  toggle_full_screen(cur_stream);
3397  cur_stream->force_refresh = 1;
3398  last_mouse_left_click = 0;
3399  } else {
3400  last_mouse_left_click = av_gettime_relative();
3401  }
3402  }
3403  case SDL_MOUSEMOTION:
3404  if (cursor_hidden) {
3405  SDL_ShowCursor(1);
3406  cursor_hidden = 0;
3407  }
3409  if (event.type == SDL_MOUSEBUTTONDOWN) {
3410  if (event.button.button != SDL_BUTTON_RIGHT)
3411  break;
3412  x = event.button.x;
3413  } else {
3414  if (!(event.motion.state & SDL_BUTTON_RMASK))
3415  break;
3416  x = event.motion.x;
3417  }
3418  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3419  uint64_t size = avio_size(cur_stream->ic->pb);
3420  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3421  } else {
3422  int64_t ts;
3423  int ns, hh, mm, ss;
3424  int tns, thh, tmm, tss;
3425  tns = cur_stream->ic->duration / 1000000LL;
3426  thh = tns / 3600;
3427  tmm = (tns % 3600) / 60;
3428  tss = (tns % 60);
3429  frac = x / cur_stream->width;
3430  ns = frac * tns;
3431  hh = ns / 3600;
3432  mm = (ns % 3600) / 60;
3433  ss = (ns % 60);
3435  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3436  hh, mm, ss, thh, tmm, tss);
3437  ts = frac * cur_stream->ic->duration;
3438  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3439  ts += cur_stream->ic->start_time;
3440  stream_seek(cur_stream, ts, 0, 0);
3441  }
3442  break;
3443  case SDL_WINDOWEVENT:
3444  switch (event.window.event) {
3445  case SDL_WINDOWEVENT_RESIZED:
3446  screen_width = cur_stream->width = event.window.data1;
3447  screen_height = cur_stream->height = event.window.data2;
3448  if (cur_stream->vis_texture) {
3449  SDL_DestroyTexture(cur_stream->vis_texture);
3450  cur_stream->vis_texture = NULL;
3451  }
3452  case SDL_WINDOWEVENT_EXPOSED:
3453  cur_stream->force_refresh = 1;
3454  }
3455  break;
3456  case SDL_QUIT:
3457  case FF_QUIT_EVENT:
3458  do_exit(cur_stream);
3459  break;
3460  default:
3461  break;
3462  }
3463  }
3464 }
3465 
3466 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3467 {
3468  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3469  return opt_default(NULL, "video_size", arg);
3470 }
3471 
3472 static int opt_width(void *optctx, const char *opt, const char *arg)
3473 {
3474  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3475  return 0;
3476 }
3477 
3478 static int opt_height(void *optctx, const char *opt, const char *arg)
3479 {
3480  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3481  return 0;
3482 }
3483 
3484 static int opt_format(void *optctx, const char *opt, const char *arg)
3485 {
3486  file_iformat = av_find_input_format(arg);
3487  if (!file_iformat) {
3488  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3489  return AVERROR(EINVAL);
3490  }
3491  return 0;
3492 }
3493 
3494 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3495 {
3496  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3497  return opt_default(NULL, "pixel_format", arg);
3498 }
3499 
3500 static int opt_sync(void *optctx, const char *opt, const char *arg)
3501 {
3502  if (!strcmp(arg, "audio"))
3504  else if (!strcmp(arg, "video"))
3506  else if (!strcmp(arg, "ext"))
3508  else {
3509  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3510  exit(1);
3511  }
3512  return 0;
3513 }
3514 
3515 static int opt_seek(void *optctx, const char *opt, const char *arg)
3516 {
3517  start_time = parse_time_or_die(opt, arg, 1);
3518  return 0;
3519 }
3520 
3521 static int opt_duration(void *optctx, const char *opt, const char *arg)
3522 {
3523  duration = parse_time_or_die(opt, arg, 1);
3524  return 0;
3525 }
3526 
3527 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3528 {
3529  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3530  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3531  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3532  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3533  return 0;
3534 }
3535 
3536 static void opt_input_file(void *optctx, const char *filename)
3537 {
3538  if (input_filename) {
3540  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3541  filename, input_filename);
3542  exit(1);
3543  }
3544  if (!strcmp(filename, "-"))
3545  filename = "pipe:";
3546  input_filename = filename;
3547 }
3548 
3549 static int opt_codec(void *optctx, const char *opt, const char *arg)
3550 {
3551  const char *spec = strchr(opt, ':');
3552  if (!spec) {
3554  "No media specifier was specified in '%s' in option '%s'\n",
3555  arg, opt);
3556  return AVERROR(EINVAL);
3557  }
3558  spec++;
3559  switch (spec[0]) {
3560  case 'a' : audio_codec_name = arg; break;
3561  case 's' : subtitle_codec_name = arg; break;
3562  case 'v' : video_codec_name = arg; break;
3563  default:
3565  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3566  return AVERROR(EINVAL);
3567  }
3568  return 0;
3569 }
3570 
3571 static int dummy;
3572 
3573 static const OptionDef options[] = {
3575  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3576  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3577  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3578  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3579  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3580  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3581  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3582  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3583  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3584  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3585  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3586  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3587  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3588  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3589  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3590  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3591  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3592  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3593  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3594  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3595  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3596  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3597  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3598  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3599  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3600  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3601  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3602  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3603  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3604  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3605  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3606  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3607  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3608  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3609 #if CONFIG_AVFILTER
3610  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3611  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3612 #endif
3613  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3614  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3615  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3616  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3617  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3618  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3619  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3620  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3621  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3622  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3623  "read and decode the streams to fill missing information with heuristics" },
3624  { NULL, },
3625 };
3626 
3627 static void show_usage(void)
3628 {
3629  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3630  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3631  av_log(NULL, AV_LOG_INFO, "\n");
3632 }
3633 
3634 void show_help_default(const char *opt, const char *arg)
3635 {
3637  show_usage();
3638  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3639  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3640  printf("\n");
3643 #if !CONFIG_AVFILTER
3645 #else
3647 #endif
3648  printf("\nWhile playing:\n"
3649  "q, ESC quit\n"
3650  "f toggle full screen\n"
3651  "p, SPC pause\n"
3652  "m toggle mute\n"
3653  "9, 0 decrease and increase volume respectively\n"
3654  "/, * decrease and increase volume respectively\n"
3655  "a cycle audio channel in the current program\n"
3656  "v cycle video channel\n"
3657  "t cycle subtitle channel in the current program\n"
3658  "c cycle program\n"
3659  "w cycle video filters or show modes\n"
3660  "s activate frame-step mode\n"
3661  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3662  "down/up seek backward/forward 1 minute\n"
3663  "page down/page up seek backward/forward 10 minutes\n"
3664  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3665  "left double-click toggle full screen\n"
3666  );
3667 }
3668 
3669 /* Called from the main */
3670 int main(int argc, char **argv)
3671 {
3672  int flags;
3673  VideoState *is;
3674 
3675  init_dynload();
3676 
3678  parse_loglevel(argc, argv, options);
3679 
3680  /* register all codecs, demux and protocols */
3681 #if CONFIG_AVDEVICE
3683 #endif
3685 
3686  init_opts();
3687 
3688  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3689  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3690 
3691  show_banner(argc, argv, options);
3692 
3693  parse_options(NULL, argc, argv, options, opt_input_file);
3694 
3695  if (!input_filename) {
3696  show_usage();
3697  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3699  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3700  exit(1);
3701  }
3702 
3703  if (display_disable) {
3704  video_disable = 1;
3705  }
3706  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3707  if (audio_disable)
3708  flags &= ~SDL_INIT_AUDIO;
3709  else {
3710  /* Try to work around an occasional ALSA buffer underflow issue when the
3711  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3712  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3713  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3714  }
3715  if (display_disable)
3716  flags &= ~SDL_INIT_VIDEO;
3717  if (SDL_Init (flags)) {
3718  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3719  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3720  exit(1);
3721  }
3722 
3723  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3724  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3725 
3726  av_init_packet(&flush_pkt);
3727  flush_pkt.data = (uint8_t *)&flush_pkt;
3728 
3729  if (!display_disable) {
3730  int flags = SDL_WINDOW_HIDDEN;
3731  if (borderless)
3732  flags |= SDL_WINDOW_BORDERLESS;
3733  else
3734  flags |= SDL_WINDOW_RESIZABLE;
3735  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3736  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3737  if (window) {
3738  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3739  if (!renderer) {
3740  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3741  renderer = SDL_CreateRenderer(window, -1, 0);
3742  }
3743  if (renderer) {
3744  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3745  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3746  }
3747  }
3748  if (!window || !renderer || !renderer_info.num_texture_formats) {
3749  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3750  do_exit(NULL);
3751  }
3752  }
3753 
3754  is = stream_open(input_filename, file_iformat);
3755  if (!is) {
3756  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3757  do_exit(NULL);
3758  }
3759 
3760  event_loop(is);
3761 
3762  /* never returns */
3763 
3764  return 0;
3765 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:521
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:488
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3527
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:775
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:368
static void video_image_display(VideoState *is)
Definition: ffplay.c:976
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:475
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:496
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1377
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3478
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
static const char * format[]
Definition: af_aiir.c:330
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3094
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2492
FrameQueue pictq
Definition: ffplay.c:223
static int screen_top
Definition: ffplay.c:318
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:366
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3842
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
double rdftspeed
Definition: ffplay.c:346
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3536
double get_rotation(AVStream *st)
Definition: cmdutils.c:2175
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:498
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3484
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1425
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:492
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3976
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:5056
channels
Definition: aptx.c:30
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3125
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3494
int size
Definition: avcodec.h:1446
const char * b
Definition: vf_curves.c:116
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1489
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1475
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:323
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:493
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1473
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3549
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:3880
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1389
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1502
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:582
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:803
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:319
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1025
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:368
static const char * audio_codec_name
Definition: ffplay.c:343
#define fn(a)
#define OPT_FLOAT
Definition: cmdutils.h:168
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3424
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3239
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2079
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3892
int width
Definition: ffplay.c:161
functionally identical to above
Definition: pixfmt.h:494
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3070
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:3881
Format I/O context.
Definition: avformat.h:1351
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3210
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5079
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3844
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2748
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:728
static int64_t start_time
Definition: ffplay.c:330
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2197
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:3966
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1206
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2718
#define f(width, name)
Definition: cbs_vp9.c:255
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1463
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3267
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:406
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5100
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:514
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1483
static int framedrop
Definition: ffplay.c:340
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2088
int bytes_per_sec
Definition: ffplay.c:140
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
static float seek_interval
Definition: ffplay.c:324
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:359
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:417
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1482
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:541
static void sigterm_handler(int sig)
Definition: ffplay.c:1321
uint8_t * data
Definition: avcodec.h:1445
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:653
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4993
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2765
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3845
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
static int64_t duration
Definition: ffplay.c:331
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1272
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:607
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2136
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:836
static int borderless
Definition: ffplay.c:326
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1508
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:563
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4183
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3070
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3438
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4166
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1591
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:355
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:349
int(* callback)(void *)
Definition: avio.h:59
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1577
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3843
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:785
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2111
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:801
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:468
static int autorotate
Definition: ffplay.c:354
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:471
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4250
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1528
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3896
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:482
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:425
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1263
static int find_stream_info
Definition: ffplay.c:355
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:357
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1334
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1409
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:891
static enum ShowMode show_mode
Definition: ffplay.c:342
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:532
static const OptionDef options[]
Definition: ffplay.c:3573
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3571
#define fail()
Definition: checkasm.h:117
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:371
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2240
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3500
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2365
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2268
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3879
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:677
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3882
static int genpts
Definition: ffplay.c:333
static AVPacket flush_pkt
Definition: ffplay.c:361
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:404
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:466
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:488
static const char * subtitle_codec_name
Definition: ffplay.c:344
static int subtitle_disable
Definition: ffplay.c:321
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:365
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4259
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
int linesize[4]
Definition: avcodec.h:3860
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int channels
number of audio channels, only used for audio.
Definition: frame.h:531
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:309
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:348
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:579
#define width
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1706
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3670
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3627
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3472
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1766
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3222
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1403
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:332
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2305
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:3877
#define s(width, name)
Definition: cbs_vp9.c:257
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1520
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2336
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:358
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:825
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2724
static int startup_volume
Definition: ffplay.c:327
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:479
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3859
static int decoder_reorder_pts
Definition: ffplay.c:335
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1397
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1135
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:512
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:738
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3634
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:874
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1981
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1635
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:299
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:372
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:354
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5005
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3521
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:336
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1541
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:739
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:860
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1543
static void do_exit(VideoState *is)
Definition: ffplay.c:1300
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2189
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:910
static int loop
Definition: ffplay.c:339
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:337
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1533
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:880
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:598
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:352
Decoder subdec
Definition: ffplay.c:229
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:960
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1050
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:314
Clock vidclk
Definition: ffplay.c:220
int x
Definition: f_ebur128.c:91
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:759
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:476
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1558
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:312
double pts
Definition: ffplay.c:144
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:733
static int audio_thread(void *arg)
Definition: ffplay.c:2024
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static int av_sync_type
Definition: ffplay.c:329
int pkt_serial
Definition: ffplay.c:192
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:721
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
static SDL_RendererInfo renderer_info
Definition: ffplay.c:367
int sample_rate
Sample rate of the audio data.
Definition: frame.h:399
int configure_filtergraph(FilterGraph *fg)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
PacketQueue audioq
Definition: ffplay.c:242
int packet_pending
Definition: ffplay.c:194
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
int64_t seek_pos
Definition: ffplay.c:213
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:288
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:165
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1064
SDL_cond * cond
Definition: ffplay.c:126
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2505
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:588
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:798
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2731
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1460
int queue_attachments_req
Definition: ffplay.c:210
unsigned nb_filters
Definition: avfilter.h:843
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:538
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:491
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:682
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:706
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
static int screen_left
Definition: ffplay.c:317
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1768
int audio_stream
Definition: ffplay.c:231
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2529
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:144
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2566
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:371
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
static int64_t cursor_last_shown
Definition: ffplay.c:347
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:688
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3466
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:909
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:908
#define flags(name, subs,...)
Definition: cbs_av1.c:596
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1735
SDL_Texture * sub_texture
Definition: ffplay.c:273
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1456
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
int frame_drops_early
Definition: ffplay.c:258
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2288
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
SDL_Texture * vid_texture
Definition: ffplay.c:274
int sample_array_index
Definition: ffplay.c:265
SDL_cond * continue_read_thread
Definition: ffplay.c:306
int64_t start
Definition: avformat.h:1311
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:709
int sample_rate
Audio only.
Definition: avcodec.h:4010
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: avcodec.h:3449
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:807
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:335
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:714
double speed
Definition: ffplay.c:147
static int exit_on_mousedown
Definition: ffplay.c:338
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
static int video_thread(void *arg)
Definition: ffplay.c:2122
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:198
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1326
AVDictionary * codec_opts
Definition: cmdutils.c:73
struct AudioParams audio_tgt
Definition: ffplay.c:256
sample_rates
if(ret< 0)
Definition: vf_mcdeint.c:279
uint8_t * audio_buf
Definition: ffplay.c:244
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
int muted
Definition: ffplay.c:251
static int display_disable
Definition: ffplay.c:325
static int video_disable
Definition: ffplay.c:320
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3564
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:913
signed 16 bits
Definition: samplefmt.h:61
int audio_buf_index
Definition: ffplay.c:248
uint8_t * audio_buf1
Definition: ffplay.c:245
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3515
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
static int screen_width
Definition: ffplay.c:315
PacketQueue * pktq
Definition: ffplay.c:179
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:927
uint32_t start_display_time
Definition: avcodec.h:3878
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1513
FFTSample * rdft_data
Definition: ffplay.c:269
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1570
int audio_clock_serial
Definition: ffplay.c:236
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:367
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1310
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:90
PacketQueue subtitleq
Definition: ffplay.c:278
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1363
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4427
static int lowres
Definition: ffplay.c:334
int eof
Definition: ffplay.c:289
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:341
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:477
double duration
Definition: ffplay.c:159
int pixels
Definition: avisynth_c.h:429
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:87
int eof_reached
true if eof reached
Definition: avio.h:239
int len
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:353
int channels
number of audio channels
Definition: avcodec.h:2190
unsigned int audio_buf1_size
Definition: ffplay.c:247
int av_buffersink_get_channels(const AVFilterContext *ctx)
SDL_Thread * read_tid
Definition: ffplay.c:204
AVPacket pkt
Definition: ffplay.c:189
int frame_size
Definition: ffplay.c:139
void av_log_set_flags(int arg)
Definition: log.c:390
int64_t start_pts
Definition: ffplay.c:196
int abort_request
Definition: ffplay.c:206
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:816
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:454
double last_updated
Definition: ffplay.c:146
Decoder viddec
Definition: ffplay.c:228
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:72
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:537
int height
Definition: ffplay.c:292
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:206
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1620
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:538
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:366
int channels
Audio only.
Definition: avcodec.h:4006
An instance of a filter.
Definition: avfilter.h:338
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1444
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1466
int height
Definition: frame.h:284
FILE * out
Definition: movenc.c:54
static const char * video_codec_name
Definition: ffplay.c:345
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3093
PacketQueue * queue
Definition: ffplay.c:190
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:647
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:743
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int subtitle_thread(void *arg)
Definition: ffplay.c:2233
FrameQueue subpq
Definition: ffplay.c:224
int format
Definition: ffplay.c:163
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1021
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1442
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:173
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:358
#define FF_QUIT_EVENT
Definition: ffplay.c:363
int xleft
Definition: ffplay.c:292
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2362
int stream_index
Definition: avcodec.h:1447
#define OPT_INPUT
Definition: cmdutils.h:181
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:903
int subtitle_stream
Definition: ffplay.c:276
unsigned int audio_buf_size
Definition: ffplay.c:246
int64_t seek_rel
Definition: ffplay.c:214
int realtime
Definition: ffplay.c:217
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:273
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:929
static void video_display(VideoState *is)
Definition: ffplay.c:1363
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:322
static int show_status
Definition: ffplay.c:328
static int compute_mod(int a, int b)
Definition: ffplay.c:1059
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: avcodec.h:1422
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2449
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:292
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1417
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3204
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1438
double last_vis_time
Definition: ffplay.c:271
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:956
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define av_unused
Definition: attributes.h:125
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:356
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_X86ASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AVFormatContext * ic
Definition: ffplay.c:216
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int audio_volume
Definition: ffplay.c:250
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:759