FFmpeg
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavutil/bprint.h"
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
46 #include "libswscale/swscale.h"
47 #include "libavutil/opt.h"
48 #include "libavcodec/avfft.h"
50 
51 #if CONFIG_AVFILTER
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 25
69 #define EXTERNAL_CLOCK_MIN_FRAMES 2
70 #define EXTERNAL_CLOCK_MAX_FRAMES 10
71 
72 /* Minimum SDL audio buffer size, in samples. */
73 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
74 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
75 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
76 
77 /* Step size for volume control in dB */
78 #define SDL_VOLUME_STEP (0.75)
79 
80 /* no AV sync correction is done if below the minimum AV sync threshold */
81 #define AV_SYNC_THRESHOLD_MIN 0.04
82 /* AV sync correction is done if above the maximum AV sync threshold */
83 #define AV_SYNC_THRESHOLD_MAX 0.1
84 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
85 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
86 /* no AV correction is done if too big error */
87 #define AV_NOSYNC_THRESHOLD 10.0
88 
89 /* maximum audio speed change to get correct sync */
90 #define SAMPLE_CORRECTION_PERCENT_MAX 10
91 
92 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
93 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
94 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
95 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
96 
97 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
98 #define AUDIO_DIFF_AVG_NB 20
99 
100 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
101 #define REFRESH_RATE 0.01
102 
103 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
104 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
105 #define SAMPLE_ARRAY_SIZE (8 * 65536)
106 
107 #define CURSOR_HIDE_DELAY 1000000
108 
109 #define USE_ONEPASS_SUBTITLE_RENDER 1
110 
111 static unsigned sws_flags = SWS_BICUBIC;
112 
113 typedef struct MyAVPacketList {
116  int serial;
118 
119 typedef struct PacketQueue {
122  int size;
123  int64_t duration;
125  int serial;
126  SDL_mutex *mutex;
127  SDL_cond *cond;
128 } PacketQueue;
129 
130 #define VIDEO_PICTURE_QUEUE_SIZE 3
131 #define SUBPICTURE_QUEUE_SIZE 16
132 #define SAMPLE_QUEUE_SIZE 9
133 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
134 
135 typedef struct AudioParams {
136  int freq;
137  int channels;
138  int64_t channel_layout;
142 } AudioParams;
143 
144 typedef struct Clock {
145  double pts; /* clock base */
146  double pts_drift; /* clock base minus time at which we updated the clock */
147  double last_updated;
148  double speed;
149  int serial; /* clock is based on a packet with this serial */
150  int paused;
151  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
152 } Clock;
153 
154 /* Common struct for handling all types of decoded data and allocated render buffers. */
155 typedef struct Frame {
158  int serial;
159  double pts; /* presentation timestamp for the frame */
160  double duration; /* estimated duration of the frame */
161  int64_t pos; /* byte position of the frame in the input file */
162  int width;
163  int height;
164  int format;
166  int uploaded;
167  int flip_v;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
194  int finished;
196  SDL_cond *empty_queue_cond;
197  int64_t start_pts;
199  int64_t next_pts;
201  SDL_Thread *decoder_tid;
202 } Decoder;
203 
204 typedef struct VideoState {
205  SDL_Thread *read_tid;
209  int paused;
212  int seek_req;
214  int64_t seek_pos;
215  int64_t seek_rel;
218  int realtime;
219 
223 
227 
231 
233 
235 
236  double audio_clock;
238  double audio_diff_cum; /* used for AV difference average computation */
247  unsigned int audio_buf_size; /* in bytes */
248  unsigned int audio_buf1_size;
249  int audio_buf_index; /* in bytes */
252  int muted;
254 #if CONFIG_AVFILTER
255  struct AudioParams audio_filter_src;
256 #endif
261 
262  enum ShowMode {
264  } show_mode;
271  int xpos;
273  SDL_Texture *vis_texture;
274  SDL_Texture *sub_texture;
275  SDL_Texture *vid_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
290  int eof;
291 
292  char *filename;
294  int step;
295 
296 #if CONFIG_AVFILTER
297  int vfilter_idx;
298  AVFilterContext *in_video_filter; // the first filter in the video chain
299  AVFilterContext *out_video_filter; // the last filter in the video chain
300  AVFilterContext *in_audio_filter; // the first filter in the audio chain
301  AVFilterContext *out_audio_filter; // the last filter in the audio chain
302  AVFilterGraph *agraph; // audio filter graph
303 #endif
304 
306 
308 } VideoState;
309 
310 /* options specified by the user */
312 static const char *input_filename;
313 static const char *window_title;
314 static int default_width = 640;
315 static int default_height = 480;
316 static int screen_width = 0;
317 static int screen_height = 0;
318 static int screen_left = SDL_WINDOWPOS_CENTERED;
319 static int screen_top = SDL_WINDOWPOS_CENTERED;
320 static int audio_disable;
321 static int video_disable;
322 static int subtitle_disable;
323 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
324 static int seek_by_bytes = -1;
325 static float seek_interval = 10;
326 static int display_disable;
327 static int borderless;
328 static int alwaysontop;
329 static int startup_volume = 100;
330 static int show_status = -1;
332 static int64_t start_time = AV_NOPTS_VALUE;
333 static int64_t duration = AV_NOPTS_VALUE;
334 static int fast = 0;
335 static int genpts = 0;
336 static int lowres = 0;
337 static int decoder_reorder_pts = -1;
338 static int autoexit;
339 static int exit_on_keydown;
340 static int exit_on_mousedown;
341 static int loop = 1;
342 static int framedrop = -1;
343 static int infinite_buffer = -1;
344 static enum ShowMode show_mode = SHOW_MODE_NONE;
345 static const char *audio_codec_name;
346 static const char *subtitle_codec_name;
347 static const char *video_codec_name;
348 double rdftspeed = 0.02;
349 static int64_t cursor_last_shown;
350 static int cursor_hidden = 0;
351 #if CONFIG_AVFILTER
352 static const char **vfilters_list = NULL;
353 static int nb_vfilters = 0;
354 static char *afilters = NULL;
355 #endif
356 static int autorotate = 1;
357 static int find_stream_info = 1;
358 static int filter_nbthreads = 0;
359 
360 /* current context */
361 static int is_full_screen;
362 static int64_t audio_callback_time;
363 
365 
366 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
367 
368 static SDL_Window *window;
369 static SDL_Renderer *renderer;
370 static SDL_RendererInfo renderer_info = {0};
371 static SDL_AudioDeviceID audio_dev;
372 
373 static const struct TextureFormatEntry {
377  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
378  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
379  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
380  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
381  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
382  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
383  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
384  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
385  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
386  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
387  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
388  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
389  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
390  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
391  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
392  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
393  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
394  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
395  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
396  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
397 };
398 
399 #if CONFIG_AVFILTER
400 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
401 {
402  GROW_ARRAY(vfilters_list, nb_vfilters);
403  vfilters_list[nb_vfilters - 1] = arg;
404  return 0;
405 }
406 #endif
407 
408 static inline
409 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
410  enum AVSampleFormat fmt2, int64_t channel_count2)
411 {
412  /* If channel count == 1, planar and non-planar formats are the same */
413  if (channel_count1 == 1 && channel_count2 == 1)
415  else
416  return channel_count1 != channel_count2 || fmt1 != fmt2;
417 }
418 
419 static inline
420 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
421 {
422  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
423  return channel_layout;
424  else
425  return 0;
426 }
427 
429 {
430  MyAVPacketList *pkt1;
431 
432  if (q->abort_request)
433  return -1;
434 
435  pkt1 = av_malloc(sizeof(MyAVPacketList));
436  if (!pkt1)
437  return -1;
438  pkt1->pkt = *pkt;
439  pkt1->next = NULL;
440  if (pkt == &flush_pkt)
441  q->serial++;
442  pkt1->serial = q->serial;
443 
444  if (!q->last_pkt)
445  q->first_pkt = pkt1;
446  else
447  q->last_pkt->next = pkt1;
448  q->last_pkt = pkt1;
449  q->nb_packets++;
450  q->size += pkt1->pkt.size + sizeof(*pkt1);
451  q->duration += pkt1->pkt.duration;
452  /* XXX: should duplicate packet data in DV case */
453  SDL_CondSignal(q->cond);
454  return 0;
455 }
456 
458 {
459  int ret;
460 
461  SDL_LockMutex(q->mutex);
463  SDL_UnlockMutex(q->mutex);
464 
465  if (pkt != &flush_pkt && ret < 0)
467 
468  return ret;
469 }
470 
471 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
472 {
473  AVPacket pkt1, *pkt = &pkt1;
475  pkt->data = NULL;
476  pkt->size = 0;
477  pkt->stream_index = stream_index;
478  return packet_queue_put(q, pkt);
479 }
480 
481 /* packet queue handling */
483 {
484  memset(q, 0, sizeof(PacketQueue));
485  q->mutex = SDL_CreateMutex();
486  if (!q->mutex) {
487  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
488  return AVERROR(ENOMEM);
489  }
490  q->cond = SDL_CreateCond();
491  if (!q->cond) {
492  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
493  return AVERROR(ENOMEM);
494  }
495  q->abort_request = 1;
496  return 0;
497 }
498 
500 {
501  MyAVPacketList *pkt, *pkt1;
502 
503  SDL_LockMutex(q->mutex);
504  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
505  pkt1 = pkt->next;
506  av_packet_unref(&pkt->pkt);
507  av_freep(&pkt);
508  }
509  q->last_pkt = NULL;
510  q->first_pkt = NULL;
511  q->nb_packets = 0;
512  q->size = 0;
513  q->duration = 0;
514  SDL_UnlockMutex(q->mutex);
515 }
516 
518 {
520  SDL_DestroyMutex(q->mutex);
521  SDL_DestroyCond(q->cond);
522 }
523 
525 {
526  SDL_LockMutex(q->mutex);
527 
528  q->abort_request = 1;
529 
530  SDL_CondSignal(q->cond);
531 
532  SDL_UnlockMutex(q->mutex);
533 }
534 
536 {
537  SDL_LockMutex(q->mutex);
538  q->abort_request = 0;
540  SDL_UnlockMutex(q->mutex);
541 }
542 
543 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
544 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
545 {
546  MyAVPacketList *pkt1;
547  int ret;
548 
549  SDL_LockMutex(q->mutex);
550 
551  for (;;) {
552  if (q->abort_request) {
553  ret = -1;
554  break;
555  }
556 
557  pkt1 = q->first_pkt;
558  if (pkt1) {
559  q->first_pkt = pkt1->next;
560  if (!q->first_pkt)
561  q->last_pkt = NULL;
562  q->nb_packets--;
563  q->size -= pkt1->pkt.size + sizeof(*pkt1);
564  q->duration -= pkt1->pkt.duration;
565  *pkt = pkt1->pkt;
566  if (serial)
567  *serial = pkt1->serial;
568  av_free(pkt1);
569  ret = 1;
570  break;
571  } else if (!block) {
572  ret = 0;
573  break;
574  } else {
575  SDL_CondWait(q->cond, q->mutex);
576  }
577  }
578  SDL_UnlockMutex(q->mutex);
579  return ret;
580 }
581 
582 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
583  memset(d, 0, sizeof(Decoder));
584  d->avctx = avctx;
585  d->queue = queue;
586  d->empty_queue_cond = empty_queue_cond;
588  d->pkt_serial = -1;
589 }
590 
592  int ret = AVERROR(EAGAIN);
593 
594  for (;;) {
595  AVPacket pkt;
596 
597  if (d->queue->serial == d->pkt_serial) {
598  do {
599  if (d->queue->abort_request)
600  return -1;
601 
602  switch (d->avctx->codec_type) {
603  case AVMEDIA_TYPE_VIDEO:
605  if (ret >= 0) {
606  if (decoder_reorder_pts == -1) {
607  frame->pts = frame->best_effort_timestamp;
608  } else if (!decoder_reorder_pts) {
609  frame->pts = frame->pkt_dts;
610  }
611  }
612  break;
613  case AVMEDIA_TYPE_AUDIO:
615  if (ret >= 0) {
616  AVRational tb = (AVRational){1, frame->sample_rate};
617  if (frame->pts != AV_NOPTS_VALUE)
618  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
619  else if (d->next_pts != AV_NOPTS_VALUE)
620  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
621  if (frame->pts != AV_NOPTS_VALUE) {
622  d->next_pts = frame->pts + frame->nb_samples;
623  d->next_pts_tb = tb;
624  }
625  }
626  break;
627  }
628  if (ret == AVERROR_EOF) {
629  d->finished = d->pkt_serial;
631  return 0;
632  }
633  if (ret >= 0)
634  return 1;
635  } while (ret != AVERROR(EAGAIN));
636  }
637 
638  do {
639  if (d->queue->nb_packets == 0)
640  SDL_CondSignal(d->empty_queue_cond);
641  if (d->packet_pending) {
642  av_packet_move_ref(&pkt, &d->pkt);
643  d->packet_pending = 0;
644  } else {
645  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
646  return -1;
647  }
648  if (d->queue->serial == d->pkt_serial)
649  break;
651  } while (1);
652 
653  if (pkt.data == flush_pkt.data) {
655  d->finished = 0;
656  d->next_pts = d->start_pts;
657  d->next_pts_tb = d->start_pts_tb;
658  } else {
660  int got_frame = 0;
661  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
662  if (ret < 0) {
663  ret = AVERROR(EAGAIN);
664  } else {
665  if (got_frame && !pkt.data) {
666  d->packet_pending = 1;
667  av_packet_move_ref(&d->pkt, &pkt);
668  }
669  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
670  }
671  } else {
672  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
673  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
674  d->packet_pending = 1;
675  av_packet_move_ref(&d->pkt, &pkt);
676  }
677  }
679  }
680  }
681 }
682 
683 static void decoder_destroy(Decoder *d) {
684  av_packet_unref(&d->pkt);
686 }
687 
689 {
690  av_frame_unref(vp->frame);
691  avsubtitle_free(&vp->sub);
692 }
693 
694 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
695 {
696  int i;
697  memset(f, 0, sizeof(FrameQueue));
698  if (!(f->mutex = SDL_CreateMutex())) {
699  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
700  return AVERROR(ENOMEM);
701  }
702  if (!(f->cond = SDL_CreateCond())) {
703  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
704  return AVERROR(ENOMEM);
705  }
706  f->pktq = pktq;
707  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
708  f->keep_last = !!keep_last;
709  for (i = 0; i < f->max_size; i++)
710  if (!(f->queue[i].frame = av_frame_alloc()))
711  return AVERROR(ENOMEM);
712  return 0;
713 }
714 
716 {
717  int i;
718  for (i = 0; i < f->max_size; i++) {
719  Frame *vp = &f->queue[i];
721  av_frame_free(&vp->frame);
722  }
723  SDL_DestroyMutex(f->mutex);
724  SDL_DestroyCond(f->cond);
725 }
726 
728 {
729  SDL_LockMutex(f->mutex);
730  SDL_CondSignal(f->cond);
731  SDL_UnlockMutex(f->mutex);
732 }
733 
735 {
736  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
737 }
738 
740 {
741  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
742 }
743 
745 {
746  return &f->queue[f->rindex];
747 }
748 
750 {
751  /* wait until we have space to put a new frame */
752  SDL_LockMutex(f->mutex);
753  while (f->size >= f->max_size &&
754  !f->pktq->abort_request) {
755  SDL_CondWait(f->cond, f->mutex);
756  }
757  SDL_UnlockMutex(f->mutex);
758 
759  if (f->pktq->abort_request)
760  return NULL;
761 
762  return &f->queue[f->windex];
763 }
764 
766 {
767  /* wait until we have a readable a new frame */
768  SDL_LockMutex(f->mutex);
769  while (f->size - f->rindex_shown <= 0 &&
770  !f->pktq->abort_request) {
771  SDL_CondWait(f->cond, f->mutex);
772  }
773  SDL_UnlockMutex(f->mutex);
774 
775  if (f->pktq->abort_request)
776  return NULL;
777 
778  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
779 }
780 
782 {
783  if (++f->windex == f->max_size)
784  f->windex = 0;
785  SDL_LockMutex(f->mutex);
786  f->size++;
787  SDL_CondSignal(f->cond);
788  SDL_UnlockMutex(f->mutex);
789 }
790 
792 {
793  if (f->keep_last && !f->rindex_shown) {
794  f->rindex_shown = 1;
795  return;
796  }
797  frame_queue_unref_item(&f->queue[f->rindex]);
798  if (++f->rindex == f->max_size)
799  f->rindex = 0;
800  SDL_LockMutex(f->mutex);
801  f->size--;
802  SDL_CondSignal(f->cond);
803  SDL_UnlockMutex(f->mutex);
804 }
805 
806 /* return the number of undisplayed frames in the queue */
808 {
809  return f->size - f->rindex_shown;
810 }
811 
812 /* return last shown position */
814 {
815  Frame *fp = &f->queue[f->rindex];
816  if (f->rindex_shown && fp->serial == f->pktq->serial)
817  return fp->pos;
818  else
819  return -1;
820 }
821 
822 static void decoder_abort(Decoder *d, FrameQueue *fq)
823 {
825  frame_queue_signal(fq);
826  SDL_WaitThread(d->decoder_tid, NULL);
827  d->decoder_tid = NULL;
829 }
830 
831 static inline void fill_rectangle(int x, int y, int w, int h)
832 {
833  SDL_Rect rect;
834  rect.x = x;
835  rect.y = y;
836  rect.w = w;
837  rect.h = h;
838  if (w && h)
839  SDL_RenderFillRect(renderer, &rect);
840 }
841 
842 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
843 {
844  Uint32 format;
845  int access, w, h;
846  if (!*texture || SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
847  void *pixels;
848  int pitch;
849  if (*texture)
850  SDL_DestroyTexture(*texture);
851  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
852  return -1;
853  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
854  return -1;
855  if (init_texture) {
856  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
857  return -1;
858  memset(pixels, 0, pitch * new_height);
859  SDL_UnlockTexture(*texture);
860  }
861  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
862  }
863  return 0;
864 }
865 
866 static void calculate_display_rect(SDL_Rect *rect,
867  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
868  int pic_width, int pic_height, AVRational pic_sar)
869 {
870  AVRational aspect_ratio = pic_sar;
871  int64_t width, height, x, y;
872 
873  if (av_cmp_q(aspect_ratio, av_make_q(0, 1)) <= 0)
874  aspect_ratio = av_make_q(1, 1);
875 
876  aspect_ratio = av_mul_q(aspect_ratio, av_make_q(pic_width, pic_height));
877 
878  /* XXX: we suppose the screen has a 1.0 pixel ratio */
879  height = scr_height;
880  width = av_rescale(height, aspect_ratio.num, aspect_ratio.den) & ~1;
881  if (width > scr_width) {
882  width = scr_width;
883  height = av_rescale(width, aspect_ratio.den, aspect_ratio.num) & ~1;
884  }
885  x = (scr_width - width) / 2;
886  y = (scr_height - height) / 2;
887  rect->x = scr_xleft + x;
888  rect->y = scr_ytop + y;
889  rect->w = FFMAX((int)width, 1);
890  rect->h = FFMAX((int)height, 1);
891 }
892 
893 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
894 {
895  int i;
896  *sdl_blendmode = SDL_BLENDMODE_NONE;
897  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
898  if (format == AV_PIX_FMT_RGB32 ||
902  *sdl_blendmode = SDL_BLENDMODE_BLEND;
903  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
905  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
906  return;
907  }
908  }
909 }
910 
911 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
912  int ret = 0;
913  Uint32 sdl_pix_fmt;
914  SDL_BlendMode sdl_blendmode;
915  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
916  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
917  return -1;
918  switch (sdl_pix_fmt) {
919  case SDL_PIXELFORMAT_UNKNOWN:
920  /* This should only happen if we are not using avfilter... */
921  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
922  frame->width, frame->height, frame->format, frame->width, frame->height,
924  if (*img_convert_ctx != NULL) {
925  uint8_t *pixels[4];
926  int pitch[4];
927  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
928  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
929  0, frame->height, pixels, pitch);
930  SDL_UnlockTexture(*tex);
931  }
932  } else {
933  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
934  ret = -1;
935  }
936  break;
937  case SDL_PIXELFORMAT_IYUV:
938  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
939  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
940  frame->data[1], frame->linesize[1],
941  frame->data[2], frame->linesize[2]);
942  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
943  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
944  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
945  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
946  } else {
947  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
948  return -1;
949  }
950  break;
951  default:
952  if (frame->linesize[0] < 0) {
953  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
954  } else {
955  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
956  }
957  break;
958  }
959  return ret;
960 }
961 
963 {
964 #if SDL_VERSION_ATLEAST(2,0,8)
965  SDL_YUV_CONVERSION_MODE mode = SDL_YUV_CONVERSION_AUTOMATIC;
966  if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
967  if (frame->color_range == AVCOL_RANGE_JPEG)
968  mode = SDL_YUV_CONVERSION_JPEG;
969  else if (frame->colorspace == AVCOL_SPC_BT709)
970  mode = SDL_YUV_CONVERSION_BT709;
971  else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M || frame->colorspace == AVCOL_SPC_SMPTE240M)
972  mode = SDL_YUV_CONVERSION_BT601;
973  }
974  SDL_SetYUVConversionMode(mode);
975 #endif
976 }
977 
979 {
980  Frame *vp;
981  Frame *sp = NULL;
982  SDL_Rect rect;
983 
984  vp = frame_queue_peek_last(&is->pictq);
985  if (is->subtitle_st) {
986  if (frame_queue_nb_remaining(&is->subpq) > 0) {
987  sp = frame_queue_peek(&is->subpq);
988 
989  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
990  if (!sp->uploaded) {
991  uint8_t* pixels[4];
992  int pitch[4];
993  int i;
994  if (!sp->width || !sp->height) {
995  sp->width = vp->width;
996  sp->height = vp->height;
997  }
998  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
999  return;
1000 
1001  for (i = 0; i < sp->sub.num_rects; i++) {
1002  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1003 
1004  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
1005  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
1006  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
1007  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
1008 
1009  is->sub_convert_ctx = sws_getCachedContext(is->sub_convert_ctx,
1010  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
1011  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
1012  0, NULL, NULL, NULL);
1013  if (!is->sub_convert_ctx) {
1014  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1015  return;
1016  }
1017  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
1018  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
1019  0, sub_rect->h, pixels, pitch);
1020  SDL_UnlockTexture(is->sub_texture);
1021  }
1022  }
1023  sp->uploaded = 1;
1024  }
1025  } else
1026  sp = NULL;
1027  }
1028  }
1029 
1030  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1031 
1032  if (!vp->uploaded) {
1033  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1034  return;
1035  vp->uploaded = 1;
1036  vp->flip_v = vp->frame->linesize[0] < 0;
1037  }
1038 
1040  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1042  if (sp) {
1043 #if USE_ONEPASS_SUBTITLE_RENDER
1044  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1045 #else
1046  int i;
1047  double xratio = (double)rect.w / (double)sp->width;
1048  double yratio = (double)rect.h / (double)sp->height;
1049  for (i = 0; i < sp->sub.num_rects; i++) {
1050  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1051  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1052  .y = rect.y + sub_rect->y * yratio,
1053  .w = sub_rect->w * xratio,
1054  .h = sub_rect->h * yratio};
1055  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1056  }
1057 #endif
1058  }
1059 }
1060 
1061 static inline int compute_mod(int a, int b)
1062 {
1063  return a < 0 ? a%b + b : a%b;
1064 }
1065 
1067 {
1068  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1069  int ch, channels, h, h2;
1070  int64_t time_diff;
1071  int rdft_bits, nb_freq;
1072 
1073  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1074  ;
1075  nb_freq = 1 << (rdft_bits - 1);
1076 
1077  /* compute display index : center on currently output samples */
1078  channels = s->audio_tgt.channels;
1079  nb_display_channels = channels;
1080  if (!s->paused) {
1081  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1082  n = 2 * channels;
1083  delay = s->audio_write_buf_size;
1084  delay /= n;
1085 
1086  /* to be more precise, we take into account the time spent since
1087  the last buffer computation */
1088  if (audio_callback_time) {
1089  time_diff = av_gettime_relative() - audio_callback_time;
1090  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1091  }
1092 
1093  delay += 2 * data_used;
1094  if (delay < data_used)
1095  delay = data_used;
1096 
1097  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1098  if (s->show_mode == SHOW_MODE_WAVES) {
1099  h = INT_MIN;
1100  for (i = 0; i < 1000; i += channels) {
1101  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1102  int a = s->sample_array[idx];
1103  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1104  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1105  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1106  int score = a - d;
1107  if (h < score && (b ^ c) < 0) {
1108  h = score;
1109  i_start = idx;
1110  }
1111  }
1112  }
1113 
1114  s->last_i_start = i_start;
1115  } else {
1116  i_start = s->last_i_start;
1117  }
1118 
1119  if (s->show_mode == SHOW_MODE_WAVES) {
1120  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1121 
1122  /* total height for one channel */
1123  h = s->height / nb_display_channels;
1124  /* graph height / 2 */
1125  h2 = (h * 9) / 20;
1126  for (ch = 0; ch < nb_display_channels; ch++) {
1127  i = i_start + ch;
1128  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1129  for (x = 0; x < s->width; x++) {
1130  y = (s->sample_array[i] * h2) >> 15;
1131  if (y < 0) {
1132  y = -y;
1133  ys = y1 - y;
1134  } else {
1135  ys = y1;
1136  }
1137  fill_rectangle(s->xleft + x, ys, 1, y);
1138  i += channels;
1139  if (i >= SAMPLE_ARRAY_SIZE)
1140  i -= SAMPLE_ARRAY_SIZE;
1141  }
1142  }
1143 
1144  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1145 
1146  for (ch = 1; ch < nb_display_channels; ch++) {
1147  y = s->ytop + ch * h;
1148  fill_rectangle(s->xleft, y, s->width, 1);
1149  }
1150  } else {
1151  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1152  return;
1153 
1154  if (s->xpos >= s->width)
1155  s->xpos = 0;
1156  nb_display_channels= FFMIN(nb_display_channels, 2);
1157  if (rdft_bits != s->rdft_bits) {
1158  av_rdft_end(s->rdft);
1159  av_free(s->rdft_data);
1160  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1161  s->rdft_bits = rdft_bits;
1162  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1163  }
1164  if (!s->rdft || !s->rdft_data){
1165  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1166  s->show_mode = SHOW_MODE_WAVES;
1167  } else {
1168  FFTSample *data[2];
1169  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1170  uint32_t *pixels;
1171  int pitch;
1172  for (ch = 0; ch < nb_display_channels; ch++) {
1173  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1174  i = i_start + ch;
1175  for (x = 0; x < 2 * nb_freq; x++) {
1176  double w = (x-nb_freq) * (1.0 / nb_freq);
1177  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1178  i += channels;
1179  if (i >= SAMPLE_ARRAY_SIZE)
1180  i -= SAMPLE_ARRAY_SIZE;
1181  }
1182  av_rdft_calc(s->rdft, data[ch]);
1183  }
1184  /* Least efficient way to do this, we should of course
1185  * directly access it but it is more than fast enough. */
1186  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1187  pitch >>= 2;
1188  pixels += pitch * s->height;
1189  for (y = 0; y < s->height; y++) {
1190  double w = 1 / sqrt(nb_freq);
1191  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1192  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1193  : a;
1194  a = FFMIN(a, 255);
1195  b = FFMIN(b, 255);
1196  pixels -= pitch;
1197  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1198  }
1199  SDL_UnlockTexture(s->vis_texture);
1200  }
1201  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1202  }
1203  if (!s->paused)
1204  s->xpos++;
1205  }
1206 }
1207 
1208 static void stream_component_close(VideoState *is, int stream_index)
1209 {
1210  AVFormatContext *ic = is->ic;
1211  AVCodecParameters *codecpar;
1212 
1213  if (stream_index < 0 || stream_index >= ic->nb_streams)
1214  return;
1215  codecpar = ic->streams[stream_index]->codecpar;
1216 
1217  switch (codecpar->codec_type) {
1218  case AVMEDIA_TYPE_AUDIO:
1219  decoder_abort(&is->auddec, &is->sampq);
1220  SDL_CloseAudioDevice(audio_dev);
1221  decoder_destroy(&is->auddec);
1222  swr_free(&is->swr_ctx);
1223  av_freep(&is->audio_buf1);
1224  is->audio_buf1_size = 0;
1225  is->audio_buf = NULL;
1226 
1227  if (is->rdft) {
1228  av_rdft_end(is->rdft);
1229  av_freep(&is->rdft_data);
1230  is->rdft = NULL;
1231  is->rdft_bits = 0;
1232  }
1233  break;
1234  case AVMEDIA_TYPE_VIDEO:
1235  decoder_abort(&is->viddec, &is->pictq);
1236  decoder_destroy(&is->viddec);
1237  break;
1238  case AVMEDIA_TYPE_SUBTITLE:
1239  decoder_abort(&is->subdec, &is->subpq);
1240  decoder_destroy(&is->subdec);
1241  break;
1242  default:
1243  break;
1244  }
1245 
1246  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1247  switch (codecpar->codec_type) {
1248  case AVMEDIA_TYPE_AUDIO:
1249  is->audio_st = NULL;
1250  is->audio_stream = -1;
1251  break;
1252  case AVMEDIA_TYPE_VIDEO:
1253  is->video_st = NULL;
1254  is->video_stream = -1;
1255  break;
1256  case AVMEDIA_TYPE_SUBTITLE:
1257  is->subtitle_st = NULL;
1258  is->subtitle_stream = -1;
1259  break;
1260  default:
1261  break;
1262  }
1263 }
1264 
1266 {
1267  /* XXX: use a special url_shutdown call to abort parse cleanly */
1268  is->abort_request = 1;
1269  SDL_WaitThread(is->read_tid, NULL);
1270 
1271  /* close each stream */
1272  if (is->audio_stream >= 0)
1273  stream_component_close(is, is->audio_stream);
1274  if (is->video_stream >= 0)
1275  stream_component_close(is, is->video_stream);
1276  if (is->subtitle_stream >= 0)
1277  stream_component_close(is, is->subtitle_stream);
1278 
1279  avformat_close_input(&is->ic);
1280 
1281  packet_queue_destroy(&is->videoq);
1282  packet_queue_destroy(&is->audioq);
1283  packet_queue_destroy(&is->subtitleq);
1284 
1285  /* free all pictures */
1286  frame_queue_destory(&is->pictq);
1287  frame_queue_destory(&is->sampq);
1288  frame_queue_destory(&is->subpq);
1289  SDL_DestroyCond(is->continue_read_thread);
1290  sws_freeContext(is->img_convert_ctx);
1291  sws_freeContext(is->sub_convert_ctx);
1292  av_free(is->filename);
1293  if (is->vis_texture)
1294  SDL_DestroyTexture(is->vis_texture);
1295  if (is->vid_texture)
1296  SDL_DestroyTexture(is->vid_texture);
1297  if (is->sub_texture)
1298  SDL_DestroyTexture(is->sub_texture);
1299  av_free(is);
1300 }
1301 
1302 static void do_exit(VideoState *is)
1303 {
1304  if (is) {
1305  stream_close(is);
1306  }
1307  if (renderer)
1308  SDL_DestroyRenderer(renderer);
1309  if (window)
1310  SDL_DestroyWindow(window);
1311  uninit_opts();
1312 #if CONFIG_AVFILTER
1313  av_freep(&vfilters_list);
1314 #endif
1316  if (show_status)
1317  printf("\n");
1318  SDL_Quit();
1319  av_log(NULL, AV_LOG_QUIET, "%s", "");
1320  exit(0);
1321 }
1322 
1323 static void sigterm_handler(int sig)
1324 {
1325  exit(123);
1326 }
1327 
1329 {
1330  SDL_Rect rect;
1331  int max_width = screen_width ? screen_width : INT_MAX;
1332  int max_height = screen_height ? screen_height : INT_MAX;
1333  if (max_width == INT_MAX && max_height == INT_MAX)
1334  max_height = height;
1335  calculate_display_rect(&rect, 0, 0, max_width, max_height, width, height, sar);
1336  default_width = rect.w;
1337  default_height = rect.h;
1338 }
1339 
1341 {
1342  int w,h;
1343 
1346 
1347  if (!window_title)
1349  SDL_SetWindowTitle(window, window_title);
1350 
1351  SDL_SetWindowSize(window, w, h);
1352  SDL_SetWindowPosition(window, screen_left, screen_top);
1353  if (is_full_screen)
1354  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1355  SDL_ShowWindow(window);
1356 
1357  is->width = w;
1358  is->height = h;
1359 
1360  return 0;
1361 }
1362 
1363 /* display the current picture, if any */
1365 {
1366  if (!is->width)
1367  video_open(is);
1368 
1369  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1370  SDL_RenderClear(renderer);
1371  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1373  else if (is->video_st)
1375  SDL_RenderPresent(renderer);
1376 }
1377 
1378 static double get_clock(Clock *c)
1379 {
1380  if (*c->queue_serial != c->serial)
1381  return NAN;
1382  if (c->paused) {
1383  return c->pts;
1384  } else {
1385  double time = av_gettime_relative() / 1000000.0;
1386  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1387  }
1388 }
1389 
1390 static void set_clock_at(Clock *c, double pts, int serial, double time)
1391 {
1392  c->pts = pts;
1393  c->last_updated = time;
1394  c->pts_drift = c->pts - time;
1395  c->serial = serial;
1396 }
1397 
1398 static void set_clock(Clock *c, double pts, int serial)
1399 {
1400  double time = av_gettime_relative() / 1000000.0;
1401  set_clock_at(c, pts, serial, time);
1402 }
1403 
1404 static void set_clock_speed(Clock *c, double speed)
1405 {
1406  set_clock(c, get_clock(c), c->serial);
1407  c->speed = speed;
1408 }
1409 
1410 static void init_clock(Clock *c, int *queue_serial)
1411 {
1412  c->speed = 1.0;
1413  c->paused = 0;
1414  c->queue_serial = queue_serial;
1415  set_clock(c, NAN, -1);
1416 }
1417 
1418 static void sync_clock_to_slave(Clock *c, Clock *slave)
1419 {
1420  double clock = get_clock(c);
1421  double slave_clock = get_clock(slave);
1422  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1423  set_clock(c, slave_clock, slave->serial);
1424 }
1425 
1427  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1428  if (is->video_st)
1429  return AV_SYNC_VIDEO_MASTER;
1430  else
1431  return AV_SYNC_AUDIO_MASTER;
1432  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1433  if (is->audio_st)
1434  return AV_SYNC_AUDIO_MASTER;
1435  else
1436  return AV_SYNC_EXTERNAL_CLOCK;
1437  } else {
1438  return AV_SYNC_EXTERNAL_CLOCK;
1439  }
1440 }
1441 
1442 /* get the current master clock value */
1444 {
1445  double val;
1446 
1447  switch (get_master_sync_type(is)) {
1448  case AV_SYNC_VIDEO_MASTER:
1449  val = get_clock(&is->vidclk);
1450  break;
1451  case AV_SYNC_AUDIO_MASTER:
1452  val = get_clock(&is->audclk);
1453  break;
1454  default:
1455  val = get_clock(&is->extclk);
1456  break;
1457  }
1458  return val;
1459 }
1460 
1462  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1463  is->audio_stream >= 0 && is->audioq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES) {
1465  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1466  (is->audio_stream < 0 || is->audioq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES)) {
1468  } else {
1469  double speed = is->extclk.speed;
1470  if (speed != 1.0)
1471  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1472  }
1473 }
1474 
1475 /* seek in the stream */
1476 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1477 {
1478  if (!is->seek_req) {
1479  is->seek_pos = pos;
1480  is->seek_rel = rel;
1481  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1482  if (seek_by_bytes)
1483  is->seek_flags |= AVSEEK_FLAG_BYTE;
1484  is->seek_req = 1;
1485  SDL_CondSignal(is->continue_read_thread);
1486  }
1487 }
1488 
1489 /* pause or resume the video */
1491 {
1492  if (is->paused) {
1493  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1494  if (is->read_pause_return != AVERROR(ENOSYS)) {
1495  is->vidclk.paused = 0;
1496  }
1497  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1498  }
1499  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1500  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1501 }
1502 
1504 {
1506  is->step = 0;
1507 }
1508 
1510 {
1511  is->muted = !is->muted;
1512 }
1513 
1514 static void update_volume(VideoState *is, int sign, double step)
1515 {
1516  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1517  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1518  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1519 }
1520 
1522 {
1523  /* if the stream is paused unpause it, then step */
1524  if (is->paused)
1526  is->step = 1;
1527 }
1528 
1529 static double compute_target_delay(double delay, VideoState *is)
1530 {
1531  double sync_threshold, diff = 0;
1532 
1533  /* update delay to follow master synchronisation source */
1535  /* if video is slave, we try to correct big delays by
1536  duplicating or deleting a frame */
1537  diff = get_clock(&is->vidclk) - get_master_clock(is);
1538 
1539  /* skip or repeat frame. We take into account the
1540  delay to compute the threshold. I still don't know
1541  if it is the best guess */
1542  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1543  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1544  if (diff <= -sync_threshold)
1545  delay = FFMAX(0, delay + diff);
1546  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1547  delay = delay + diff;
1548  else if (diff >= sync_threshold)
1549  delay = 2 * delay;
1550  }
1551  }
1552 
1553  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1554  delay, -diff);
1555 
1556  return delay;
1557 }
1558 
1559 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1560  if (vp->serial == nextvp->serial) {
1561  double duration = nextvp->pts - vp->pts;
1562  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1563  return vp->duration;
1564  else
1565  return duration;
1566  } else {
1567  return 0.0;
1568  }
1569 }
1570 
1571 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1572  /* update current video pts */
1573  set_clock(&is->vidclk, pts, serial);
1574  sync_clock_to_slave(&is->extclk, &is->vidclk);
1575 }
1576 
1577 /* called to display each frame */
1578 static void video_refresh(void *opaque, double *remaining_time)
1579 {
1580  VideoState *is = opaque;
1581  double time;
1582 
1583  Frame *sp, *sp2;
1584 
1585  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1587 
1588  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1589  time = av_gettime_relative() / 1000000.0;
1590  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1591  video_display(is);
1592  is->last_vis_time = time;
1593  }
1594  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1595  }
1596 
1597  if (is->video_st) {
1598 retry:
1599  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1600  // nothing to do, no picture to display in the queue
1601  } else {
1602  double last_duration, duration, delay;
1603  Frame *vp, *lastvp;
1604 
1605  /* dequeue the picture */
1606  lastvp = frame_queue_peek_last(&is->pictq);
1607  vp = frame_queue_peek(&is->pictq);
1608 
1609  if (vp->serial != is->videoq.serial) {
1610  frame_queue_next(&is->pictq);
1611  goto retry;
1612  }
1613 
1614  if (lastvp->serial != vp->serial)
1615  is->frame_timer = av_gettime_relative() / 1000000.0;
1616 
1617  if (is->paused)
1618  goto display;
1619 
1620  /* compute nominal last_duration */
1621  last_duration = vp_duration(is, lastvp, vp);
1622  delay = compute_target_delay(last_duration, is);
1623 
1624  time= av_gettime_relative()/1000000.0;
1625  if (time < is->frame_timer + delay) {
1626  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1627  goto display;
1628  }
1629 
1630  is->frame_timer += delay;
1631  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1632  is->frame_timer = time;
1633 
1634  SDL_LockMutex(is->pictq.mutex);
1635  if (!isnan(vp->pts))
1636  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1637  SDL_UnlockMutex(is->pictq.mutex);
1638 
1639  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1640  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1641  duration = vp_duration(is, vp, nextvp);
1642  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1643  is->frame_drops_late++;
1644  frame_queue_next(&is->pictq);
1645  goto retry;
1646  }
1647  }
1648 
1649  if (is->subtitle_st) {
1650  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1651  sp = frame_queue_peek(&is->subpq);
1652 
1653  if (frame_queue_nb_remaining(&is->subpq) > 1)
1654  sp2 = frame_queue_peek_next(&is->subpq);
1655  else
1656  sp2 = NULL;
1657 
1658  if (sp->serial != is->subtitleq.serial
1659  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1660  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1661  {
1662  if (sp->uploaded) {
1663  int i;
1664  for (i = 0; i < sp->sub.num_rects; i++) {
1665  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1666  uint8_t *pixels;
1667  int pitch, j;
1668 
1669  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1670  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1671  memset(pixels, 0, sub_rect->w << 2);
1672  SDL_UnlockTexture(is->sub_texture);
1673  }
1674  }
1675  }
1676  frame_queue_next(&is->subpq);
1677  } else {
1678  break;
1679  }
1680  }
1681  }
1682 
1683  frame_queue_next(&is->pictq);
1684  is->force_refresh = 1;
1685 
1686  if (is->step && !is->paused)
1688  }
1689 display:
1690  /* display picture */
1691  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1692  video_display(is);
1693  }
1694  is->force_refresh = 0;
1695  if (show_status) {
1696  AVBPrint buf;
1697  static int64_t last_time;
1698  int64_t cur_time;
1699  int aqsize, vqsize, sqsize;
1700  double av_diff;
1701 
1702  cur_time = av_gettime_relative();
1703  if (!last_time || (cur_time - last_time) >= 30000) {
1704  aqsize = 0;
1705  vqsize = 0;
1706  sqsize = 0;
1707  if (is->audio_st)
1708  aqsize = is->audioq.size;
1709  if (is->video_st)
1710  vqsize = is->videoq.size;
1711  if (is->subtitle_st)
1712  sqsize = is->subtitleq.size;
1713  av_diff = 0;
1714  if (is->audio_st && is->video_st)
1715  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1716  else if (is->video_st)
1717  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1718  else if (is->audio_st)
1719  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1720 
1722  av_bprintf(&buf,
1723  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1725  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1726  av_diff,
1727  is->frame_drops_early + is->frame_drops_late,
1728  aqsize / 1024,
1729  vqsize / 1024,
1730  sqsize,
1731  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
1732  is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
1733 
1734  if (show_status == 1 && AV_LOG_INFO > av_log_get_level())
1735  fprintf(stderr, "%s", buf.str);
1736  else
1737  av_log(NULL, AV_LOG_INFO, "%s", buf.str);
1738 
1739  fflush(stderr);
1740  av_bprint_finalize(&buf, NULL);
1741 
1742  last_time = cur_time;
1743  }
1744  }
1745 }
1746 
1747 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1748 {
1749  Frame *vp;
1750 
1751 #if defined(DEBUG_SYNC)
1752  printf("frame_type=%c pts=%0.3f\n",
1753  av_get_picture_type_char(src_frame->pict_type), pts);
1754 #endif
1755 
1756  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1757  return -1;
1758 
1759  vp->sar = src_frame->sample_aspect_ratio;
1760  vp->uploaded = 0;
1761 
1762  vp->width = src_frame->width;
1763  vp->height = src_frame->height;
1764  vp->format = src_frame->format;
1765 
1766  vp->pts = pts;
1767  vp->duration = duration;
1768  vp->pos = pos;
1769  vp->serial = serial;
1770 
1771  set_default_window_size(vp->width, vp->height, vp->sar);
1772 
1773  av_frame_move_ref(vp->frame, src_frame);
1774  frame_queue_push(&is->pictq);
1775  return 0;
1776 }
1777 
1779 {
1780  int got_picture;
1781 
1782  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1783  return -1;
1784 
1785  if (got_picture) {
1786  double dpts = NAN;
1787 
1788  if (frame->pts != AV_NOPTS_VALUE)
1789  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1790 
1791  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1792 
1794  if (frame->pts != AV_NOPTS_VALUE) {
1795  double diff = dpts - get_master_clock(is);
1796  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1797  diff - is->frame_last_filter_delay < 0 &&
1798  is->viddec.pkt_serial == is->vidclk.serial &&
1799  is->videoq.nb_packets) {
1800  is->frame_drops_early++;
1802  got_picture = 0;
1803  }
1804  }
1805  }
1806  }
1807 
1808  return got_picture;
1809 }
1810 
1811 #if CONFIG_AVFILTER
1812 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1813  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1814 {
1815  int ret, i;
1816  int nb_filters = graph->nb_filters;
1818 
1819  if (filtergraph) {
1822  if (!outputs || !inputs) {
1823  ret = AVERROR(ENOMEM);
1824  goto fail;
1825  }
1826 
1827  outputs->name = av_strdup("in");
1828  outputs->filter_ctx = source_ctx;
1829  outputs->pad_idx = 0;
1830  outputs->next = NULL;
1831 
1832  inputs->name = av_strdup("out");
1833  inputs->filter_ctx = sink_ctx;
1834  inputs->pad_idx = 0;
1835  inputs->next = NULL;
1836 
1837  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1838  goto fail;
1839  } else {
1840  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1841  goto fail;
1842  }
1843 
1844  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1845  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1846  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1847 
1848  ret = avfilter_graph_config(graph, NULL);
1849 fail:
1852  return ret;
1853 }
1854 
1855 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1856 {
1858  char sws_flags_str[512] = "";
1859  char buffersrc_args[256];
1860  int ret;
1861  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1862  AVCodecParameters *codecpar = is->video_st->codecpar;
1863  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1864  AVDictionaryEntry *e = NULL;
1865  int nb_pix_fmts = 0;
1866  int i, j;
1867 
1868  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1869  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1870  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1871  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1872  break;
1873  }
1874  }
1875  }
1876  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1877 
1878  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1879  if (!strcmp(e->key, "sws_flags")) {
1880  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1881  } else
1882  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1883  }
1884  if (strlen(sws_flags_str))
1885  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1886 
1887  graph->scale_sws_opts = av_strdup(sws_flags_str);
1888 
1889  snprintf(buffersrc_args, sizeof(buffersrc_args),
1890  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1891  frame->width, frame->height, frame->format,
1892  is->video_st->time_base.num, is->video_st->time_base.den,
1893  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1894  if (fr.num && fr.den)
1895  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1896 
1897  if ((ret = avfilter_graph_create_filter(&filt_src,
1898  avfilter_get_by_name("buffer"),
1899  "ffplay_buffer", buffersrc_args, NULL,
1900  graph)) < 0)
1901  goto fail;
1902 
1903  ret = avfilter_graph_create_filter(&filt_out,
1904  avfilter_get_by_name("buffersink"),
1905  "ffplay_buffersink", NULL, NULL, graph);
1906  if (ret < 0)
1907  goto fail;
1908 
1909  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1910  goto fail;
1911 
1912  last_filter = filt_out;
1913 
1914 /* Note: this macro adds a filter before the lastly added filter, so the
1915  * processing order of the filters is in reverse */
1916 #define INSERT_FILT(name, arg) do { \
1917  AVFilterContext *filt_ctx; \
1918  \
1919  ret = avfilter_graph_create_filter(&filt_ctx, \
1920  avfilter_get_by_name(name), \
1921  "ffplay_" name, arg, NULL, graph); \
1922  if (ret < 0) \
1923  goto fail; \
1924  \
1925  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1926  if (ret < 0) \
1927  goto fail; \
1928  \
1929  last_filter = filt_ctx; \
1930 } while (0)
1931 
1932  if (autorotate) {
1933  double theta = get_rotation(is->video_st);
1934 
1935  if (fabs(theta - 90) < 1.0) {
1936  INSERT_FILT("transpose", "clock");
1937  } else if (fabs(theta - 180) < 1.0) {
1938  INSERT_FILT("hflip", NULL);
1939  INSERT_FILT("vflip", NULL);
1940  } else if (fabs(theta - 270) < 1.0) {
1941  INSERT_FILT("transpose", "cclock");
1942  } else if (fabs(theta) > 1.0) {
1943  char rotate_buf[64];
1944  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1945  INSERT_FILT("rotate", rotate_buf);
1946  }
1947  }
1948 
1949  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1950  goto fail;
1951 
1952  is->in_video_filter = filt_src;
1953  is->out_video_filter = filt_out;
1954 
1955 fail:
1956  return ret;
1957 }
1958 
1959 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1960 {
1962  int sample_rates[2] = { 0, -1 };
1963  int64_t channel_layouts[2] = { 0, -1 };
1964  int channels[2] = { 0, -1 };
1965  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1966  char aresample_swr_opts[512] = "";
1967  AVDictionaryEntry *e = NULL;
1968  char asrc_args[256];
1969  int ret;
1970 
1971  avfilter_graph_free(&is->agraph);
1972  if (!(is->agraph = avfilter_graph_alloc()))
1973  return AVERROR(ENOMEM);
1974  is->agraph->nb_threads = filter_nbthreads;
1975 
1976  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1977  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1978  if (strlen(aresample_swr_opts))
1979  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1980  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1981 
1982  ret = snprintf(asrc_args, sizeof(asrc_args),
1983  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1984  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1985  is->audio_filter_src.channels,
1986  1, is->audio_filter_src.freq);
1987  if (is->audio_filter_src.channel_layout)
1988  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1989  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1990 
1991  ret = avfilter_graph_create_filter(&filt_asrc,
1992  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1993  asrc_args, NULL, is->agraph);
1994  if (ret < 0)
1995  goto end;
1996 
1997 
1998  ret = avfilter_graph_create_filter(&filt_asink,
1999  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2000  NULL, NULL, is->agraph);
2001  if (ret < 0)
2002  goto end;
2003 
2004  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2005  goto end;
2006  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2007  goto end;
2008 
2009  if (force_output_format) {
2010  channel_layouts[0] = is->audio_tgt.channel_layout;
2011  channels [0] = is->audio_tgt.channels;
2012  sample_rates [0] = is->audio_tgt.freq;
2013  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2014  goto end;
2015  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2016  goto end;
2017  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2018  goto end;
2019  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2020  goto end;
2021  }
2022 
2023 
2024  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2025  goto end;
2026 
2027  is->in_audio_filter = filt_asrc;
2028  is->out_audio_filter = filt_asink;
2029 
2030 end:
2031  if (ret < 0)
2032  avfilter_graph_free(&is->agraph);
2033  return ret;
2034 }
2035 #endif /* CONFIG_AVFILTER */
2036 
2037 static int audio_thread(void *arg)
2038 {
2039  VideoState *is = arg;
2041  Frame *af;
2042 #if CONFIG_AVFILTER
2043  int last_serial = -1;
2044  int64_t dec_channel_layout;
2045  int reconfigure;
2046 #endif
2047  int got_frame = 0;
2048  AVRational tb;
2049  int ret = 0;
2050 
2051  if (!frame)
2052  return AVERROR(ENOMEM);
2053 
2054  do {
2055  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2056  goto the_end;
2057 
2058  if (got_frame) {
2059  tb = (AVRational){1, frame->sample_rate};
2060 
2061 #if CONFIG_AVFILTER
2062  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2063 
2064  reconfigure =
2065  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2066  frame->format, frame->channels) ||
2067  is->audio_filter_src.channel_layout != dec_channel_layout ||
2068  is->audio_filter_src.freq != frame->sample_rate ||
2069  is->auddec.pkt_serial != last_serial;
2070 
2071  if (reconfigure) {
2072  char buf1[1024], buf2[1024];
2073  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2074  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2076  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2077  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2078  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2079 
2080  is->audio_filter_src.fmt = frame->format;
2081  is->audio_filter_src.channels = frame->channels;
2082  is->audio_filter_src.channel_layout = dec_channel_layout;
2083  is->audio_filter_src.freq = frame->sample_rate;
2084  last_serial = is->auddec.pkt_serial;
2085 
2086  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2087  goto the_end;
2088  }
2089 
2090  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2091  goto the_end;
2092 
2093  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2094  tb = av_buffersink_get_time_base(is->out_audio_filter);
2095 #endif
2096  if (!(af = frame_queue_peek_writable(&is->sampq)))
2097  goto the_end;
2098 
2099  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2100  af->pos = frame->pkt_pos;
2101  af->serial = is->auddec.pkt_serial;
2102  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2103 
2105  frame_queue_push(&is->sampq);
2106 
2107 #if CONFIG_AVFILTER
2108  if (is->audioq.serial != is->auddec.pkt_serial)
2109  break;
2110  }
2111  if (ret == AVERROR_EOF)
2112  is->auddec.finished = is->auddec.pkt_serial;
2113 #endif
2114  }
2115  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2116  the_end:
2117 #if CONFIG_AVFILTER
2118  avfilter_graph_free(&is->agraph);
2119 #endif
2120  av_frame_free(&frame);
2121  return ret;
2122 }
2123 
2124 static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
2125 {
2127  d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);
2128  if (!d->decoder_tid) {
2129  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2130  return AVERROR(ENOMEM);
2131  }
2132  return 0;
2133 }
2134 
2135 static int video_thread(void *arg)
2136 {
2137  VideoState *is = arg;
2139  double pts;
2140  double duration;
2141  int ret;
2142  AVRational tb = is->video_st->time_base;
2143  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2144 
2145 #if CONFIG_AVFILTER
2146  AVFilterGraph *graph = NULL;
2147  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2148  int last_w = 0;
2149  int last_h = 0;
2150  enum AVPixelFormat last_format = -2;
2151  int last_serial = -1;
2152  int last_vfilter_idx = 0;
2153 #endif
2154 
2155  if (!frame)
2156  return AVERROR(ENOMEM);
2157 
2158  for (;;) {
2160  if (ret < 0)
2161  goto the_end;
2162  if (!ret)
2163  continue;
2164 
2165 #if CONFIG_AVFILTER
2166  if ( last_w != frame->width
2167  || last_h != frame->height
2168  || last_format != frame->format
2169  || last_serial != is->viddec.pkt_serial
2170  || last_vfilter_idx != is->vfilter_idx) {
2172  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2173  last_w, last_h,
2174  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2175  frame->width, frame->height,
2176  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2177  avfilter_graph_free(&graph);
2178  graph = avfilter_graph_alloc();
2179  if (!graph) {
2180  ret = AVERROR(ENOMEM);
2181  goto the_end;
2182  }
2183  graph->nb_threads = filter_nbthreads;
2184  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2185  SDL_Event event;
2186  event.type = FF_QUIT_EVENT;
2187  event.user.data1 = is;
2188  SDL_PushEvent(&event);
2189  goto the_end;
2190  }
2191  filt_in = is->in_video_filter;
2192  filt_out = is->out_video_filter;
2193  last_w = frame->width;
2194  last_h = frame->height;
2195  last_format = frame->format;
2196  last_serial = is->viddec.pkt_serial;
2197  last_vfilter_idx = is->vfilter_idx;
2198  frame_rate = av_buffersink_get_frame_rate(filt_out);
2199  }
2200 
2201  ret = av_buffersrc_add_frame(filt_in, frame);
2202  if (ret < 0)
2203  goto the_end;
2204 
2205  while (ret >= 0) {
2206  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2207 
2208  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2209  if (ret < 0) {
2210  if (ret == AVERROR_EOF)
2211  is->viddec.finished = is->viddec.pkt_serial;
2212  ret = 0;
2213  break;
2214  }
2215 
2216  is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
2217  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2218  is->frame_last_filter_delay = 0;
2219  tb = av_buffersink_get_time_base(filt_out);
2220 #endif
2221  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2222  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2223  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2225 #if CONFIG_AVFILTER
2226  if (is->videoq.serial != is->viddec.pkt_serial)
2227  break;
2228  }
2229 #endif
2230 
2231  if (ret < 0)
2232  goto the_end;
2233  }
2234  the_end:
2235 #if CONFIG_AVFILTER
2236  avfilter_graph_free(&graph);
2237 #endif
2238  av_frame_free(&frame);
2239  return 0;
2240 }
2241 
2242 static int subtitle_thread(void *arg)
2243 {
2244  VideoState *is = arg;
2245  Frame *sp;
2246  int got_subtitle;
2247  double pts;
2248 
2249  for (;;) {
2250  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2251  return 0;
2252 
2253  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2254  break;
2255 
2256  pts = 0;
2257 
2258  if (got_subtitle && sp->sub.format == 0) {
2259  if (sp->sub.pts != AV_NOPTS_VALUE)
2260  pts = sp->sub.pts / (double)AV_TIME_BASE;
2261  sp->pts = pts;
2262  sp->serial = is->subdec.pkt_serial;
2263  sp->width = is->subdec.avctx->width;
2264  sp->height = is->subdec.avctx->height;
2265  sp->uploaded = 0;
2266 
2267  /* now we can update the picture count */
2268  frame_queue_push(&is->subpq);
2269  } else if (got_subtitle) {
2270  avsubtitle_free(&sp->sub);
2271  }
2272  }
2273  return 0;
2274 }
2275 
2276 /* copy samples for viewing in editor window */
2277 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2278 {
2279  int size, len;
2280 
2281  size = samples_size / sizeof(short);
2282  while (size > 0) {
2283  len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2284  if (len > size)
2285  len = size;
2286  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2287  samples += len;
2288  is->sample_array_index += len;
2289  if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2290  is->sample_array_index = 0;
2291  size -= len;
2292  }
2293 }
2294 
2295 /* return the wanted number of samples to get better sync if sync_type is video
2296  * or external master clock */
2297 static int synchronize_audio(VideoState *is, int nb_samples)
2298 {
2299  int wanted_nb_samples = nb_samples;
2300 
2301  /* if not master, then we try to remove or add samples to correct the clock */
2303  double diff, avg_diff;
2304  int min_nb_samples, max_nb_samples;
2305 
2306  diff = get_clock(&is->audclk) - get_master_clock(is);
2307 
2308  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2309  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2310  if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2311  /* not enough measures to have a correct estimate */
2312  is->audio_diff_avg_count++;
2313  } else {
2314  /* estimate the A-V difference */
2315  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2316 
2317  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2318  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2319  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2320  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2321  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2322  }
2323  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2324  diff, avg_diff, wanted_nb_samples - nb_samples,
2325  is->audio_clock, is->audio_diff_threshold);
2326  }
2327  } else {
2328  /* too big difference : may be initial PTS errors, so
2329  reset A-V filter */
2330  is->audio_diff_avg_count = 0;
2331  is->audio_diff_cum = 0;
2332  }
2333  }
2334 
2335  return wanted_nb_samples;
2336 }
2337 
2338 /**
2339  * Decode one audio frame and return its uncompressed size.
2340  *
2341  * The processed audio frame is decoded, converted if required, and
2342  * stored in is->audio_buf, with size in bytes given by the return
2343  * value.
2344  */
2346 {
2347  int data_size, resampled_data_size;
2348  int64_t dec_channel_layout;
2349  av_unused double audio_clock0;
2350  int wanted_nb_samples;
2351  Frame *af;
2352 
2353  if (is->paused)
2354  return -1;
2355 
2356  do {
2357 #if defined(_WIN32)
2358  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2359  if ((av_gettime_relative() - audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
2360  return -1;
2361  av_usleep (1000);
2362  }
2363 #endif
2364  if (!(af = frame_queue_peek_readable(&is->sampq)))
2365  return -1;
2366  frame_queue_next(&is->sampq);
2367  } while (af->serial != is->audioq.serial);
2368 
2369  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2370  af->frame->nb_samples,
2371  af->frame->format, 1);
2372 
2373  dec_channel_layout =
2376  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2377 
2378  if (af->frame->format != is->audio_src.fmt ||
2379  dec_channel_layout != is->audio_src.channel_layout ||
2380  af->frame->sample_rate != is->audio_src.freq ||
2381  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2382  swr_free(&is->swr_ctx);
2383  is->swr_ctx = swr_alloc_set_opts(NULL,
2384  is->audio_tgt.channel_layout, is->audio_tgt.fmt, is->audio_tgt.freq,
2385  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2386  0, NULL);
2387  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2389  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2391  is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
2392  swr_free(&is->swr_ctx);
2393  return -1;
2394  }
2395  is->audio_src.channel_layout = dec_channel_layout;
2396  is->audio_src.channels = af->frame->channels;
2397  is->audio_src.freq = af->frame->sample_rate;
2398  is->audio_src.fmt = af->frame->format;
2399  }
2400 
2401  if (is->swr_ctx) {
2402  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2403  uint8_t **out = &is->audio_buf1;
2404  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2405  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2406  int len2;
2407  if (out_size < 0) {
2408  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2409  return -1;
2410  }
2411  if (wanted_nb_samples != af->frame->nb_samples) {
2412  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2413  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2414  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2415  return -1;
2416  }
2417  }
2418  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2419  if (!is->audio_buf1)
2420  return AVERROR(ENOMEM);
2421  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2422  if (len2 < 0) {
2423  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2424  return -1;
2425  }
2426  if (len2 == out_count) {
2427  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2428  if (swr_init(is->swr_ctx) < 0)
2429  swr_free(&is->swr_ctx);
2430  }
2431  is->audio_buf = is->audio_buf1;
2432  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2433  } else {
2434  is->audio_buf = af->frame->data[0];
2435  resampled_data_size = data_size;
2436  }
2437 
2438  audio_clock0 = is->audio_clock;
2439  /* update the audio clock with the pts */
2440  if (!isnan(af->pts))
2441  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2442  else
2443  is->audio_clock = NAN;
2444  is->audio_clock_serial = af->serial;
2445 #ifdef DEBUG
2446  {
2447  static double last_clock;
2448  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2449  is->audio_clock - last_clock,
2450  is->audio_clock, audio_clock0);
2451  last_clock = is->audio_clock;
2452  }
2453 #endif
2454  return resampled_data_size;
2455 }
2456 
2457 /* prepare a new audio buffer */
2458 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2459 {
2460  VideoState *is = opaque;
2461  int audio_size, len1;
2462 
2464 
2465  while (len > 0) {
2466  if (is->audio_buf_index >= is->audio_buf_size) {
2467  audio_size = audio_decode_frame(is);
2468  if (audio_size < 0) {
2469  /* if error, just output silence */
2470  is->audio_buf = NULL;
2471  is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2472  } else {
2473  if (is->show_mode != SHOW_MODE_VIDEO)
2474  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2475  is->audio_buf_size = audio_size;
2476  }
2477  is->audio_buf_index = 0;
2478  }
2479  len1 = is->audio_buf_size - is->audio_buf_index;
2480  if (len1 > len)
2481  len1 = len;
2482  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2483  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2484  else {
2485  memset(stream, 0, len1);
2486  if (!is->muted && is->audio_buf)
2487  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2488  }
2489  len -= len1;
2490  stream += len1;
2491  is->audio_buf_index += len1;
2492  }
2493  is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2494  /* Let's assume the audio driver that is used by SDL has two periods. */
2495  if (!isnan(is->audio_clock)) {
2496  set_clock_at(&is->audclk, is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec, is->audio_clock_serial, audio_callback_time / 1000000.0);
2497  sync_clock_to_slave(&is->extclk, &is->audclk);
2498  }
2499 }
2500 
2501 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2502 {
2503  SDL_AudioSpec wanted_spec, spec;
2504  const char *env;
2505  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2506  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2507  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2508 
2509  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2510  if (env) {
2511  wanted_nb_channels = atoi(env);
2512  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2513  }
2514  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2515  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2516  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2517  }
2518  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2519  wanted_spec.channels = wanted_nb_channels;
2520  wanted_spec.freq = wanted_sample_rate;
2521  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2522  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2523  return -1;
2524  }
2525  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2526  next_sample_rate_idx--;
2527  wanted_spec.format = AUDIO_S16SYS;
2528  wanted_spec.silence = 0;
2529  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2530  wanted_spec.callback = sdl_audio_callback;
2531  wanted_spec.userdata = opaque;
2532  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2533  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2534  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2535  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2536  if (!wanted_spec.channels) {
2537  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2538  wanted_spec.channels = wanted_nb_channels;
2539  if (!wanted_spec.freq) {
2541  "No more combinations to try, audio open failed\n");
2542  return -1;
2543  }
2544  }
2545  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2546  }
2547  if (spec.format != AUDIO_S16SYS) {
2549  "SDL advised audio format %d is not supported!\n", spec.format);
2550  return -1;
2551  }
2552  if (spec.channels != wanted_spec.channels) {
2553  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2554  if (!wanted_channel_layout) {
2556  "SDL advised channel count %d is not supported!\n", spec.channels);
2557  return -1;
2558  }
2559  }
2560 
2561  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2562  audio_hw_params->freq = spec.freq;
2563  audio_hw_params->channel_layout = wanted_channel_layout;
2564  audio_hw_params->channels = spec.channels;
2565  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2566  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2567  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2568  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2569  return -1;
2570  }
2571  return spec.size;
2572 }
2573 
2574 /* open a given stream. Return 0 if OK */
2575 static int stream_component_open(VideoState *is, int stream_index)
2576 {
2577  AVFormatContext *ic = is->ic;
2578  AVCodecContext *avctx;
2579  AVCodec *codec;
2580  const char *forced_codec_name = NULL;
2581  AVDictionary *opts = NULL;
2582  AVDictionaryEntry *t = NULL;
2583  int sample_rate, nb_channels;
2584  int64_t channel_layout;
2585  int ret = 0;
2586  int stream_lowres = lowres;
2587 
2588  if (stream_index < 0 || stream_index >= ic->nb_streams)
2589  return -1;
2590 
2591  avctx = avcodec_alloc_context3(NULL);
2592  if (!avctx)
2593  return AVERROR(ENOMEM);
2594 
2595  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2596  if (ret < 0)
2597  goto fail;
2598  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2599 
2600  codec = avcodec_find_decoder(avctx->codec_id);
2601 
2602  switch(avctx->codec_type){
2603  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2604  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2605  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2606  }
2607  if (forced_codec_name)
2608  codec = avcodec_find_decoder_by_name(forced_codec_name);
2609  if (!codec) {
2610  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2611  "No codec could be found with name '%s'\n", forced_codec_name);
2612  else av_log(NULL, AV_LOG_WARNING,
2613  "No decoder could be found for codec %s\n", avcodec_get_name(avctx->codec_id));
2614  ret = AVERROR(EINVAL);
2615  goto fail;
2616  }
2617 
2618  avctx->codec_id = codec->id;
2619  if (stream_lowres > codec->max_lowres) {
2620  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2621  codec->max_lowres);
2622  stream_lowres = codec->max_lowres;
2623  }
2624  avctx->lowres = stream_lowres;
2625 
2626  if (fast)
2627  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2628 
2629  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2630  if (!av_dict_get(opts, "threads", NULL, 0))
2631  av_dict_set(&opts, "threads", "auto", 0);
2632  if (stream_lowres)
2633  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2634  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2635  av_dict_set(&opts, "refcounted_frames", "1", 0);
2636  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2637  goto fail;
2638  }
2639  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2640  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2642  goto fail;
2643  }
2644 
2645  is->eof = 0;
2646  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2647  switch (avctx->codec_type) {
2648  case AVMEDIA_TYPE_AUDIO:
2649 #if CONFIG_AVFILTER
2650  {
2651  AVFilterContext *sink;
2652 
2653  is->audio_filter_src.freq = avctx->sample_rate;
2654  is->audio_filter_src.channels = avctx->channels;
2655  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2656  is->audio_filter_src.fmt = avctx->sample_fmt;
2657  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2658  goto fail;
2659  sink = is->out_audio_filter;
2662  channel_layout = av_buffersink_get_channel_layout(sink);
2663  }
2664 #else
2665  sample_rate = avctx->sample_rate;
2666  nb_channels = avctx->channels;
2667  channel_layout = avctx->channel_layout;
2668 #endif
2669 
2670  /* prepare audio output */
2671  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2672  goto fail;
2673  is->audio_hw_buf_size = ret;
2674  is->audio_src = is->audio_tgt;
2675  is->audio_buf_size = 0;
2676  is->audio_buf_index = 0;
2677 
2678  /* init averaging filter */
2679  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2680  is->audio_diff_avg_count = 0;
2681  /* since we do not have a precise anough audio FIFO fullness,
2682  we correct audio sync only if larger than this threshold */
2683  is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
2684 
2685  is->audio_stream = stream_index;
2686  is->audio_st = ic->streams[stream_index];
2687 
2688  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2689  if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
2690  is->auddec.start_pts = is->audio_st->start_time;
2691  is->auddec.start_pts_tb = is->audio_st->time_base;
2692  }
2693  if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
2694  goto out;
2695  SDL_PauseAudioDevice(audio_dev, 0);
2696  break;
2697  case AVMEDIA_TYPE_VIDEO:
2698  is->video_stream = stream_index;
2699  is->video_st = ic->streams[stream_index];
2700 
2701  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2702  if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
2703  goto out;
2704  is->queue_attachments_req = 1;
2705  break;
2706  case AVMEDIA_TYPE_SUBTITLE:
2707  is->subtitle_stream = stream_index;
2708  is->subtitle_st = ic->streams[stream_index];
2709 
2710  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2711  if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
2712  goto out;
2713  break;
2714  default:
2715  break;
2716  }
2717  goto out;
2718 
2719 fail:
2720  avcodec_free_context(&avctx);
2721 out:
2722  av_dict_free(&opts);
2723 
2724  return ret;
2725 }
2726 
2727 static int decode_interrupt_cb(void *ctx)
2728 {
2729  VideoState *is = ctx;
2730  return is->abort_request;
2731 }
2732 
2733 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2734  return stream_id < 0 ||
2735  queue->abort_request ||
2737  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2738 }
2739 
2741 {
2742  if( !strcmp(s->iformat->name, "rtp")
2743  || !strcmp(s->iformat->name, "rtsp")
2744  || !strcmp(s->iformat->name, "sdp")
2745  )
2746  return 1;
2747 
2748  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2749  || !strncmp(s->url, "udp:", 4)
2750  )
2751  )
2752  return 1;
2753  return 0;
2754 }
2755 
2756 /* this thread gets the stream from the disk or the network */
2757 static int read_thread(void *arg)
2758 {
2759  VideoState *is = arg;
2760  AVFormatContext *ic = NULL;
2761  int err, i, ret;
2762  int st_index[AVMEDIA_TYPE_NB];
2763  AVPacket pkt1, *pkt = &pkt1;
2764  int64_t stream_start_time;
2765  int pkt_in_play_range = 0;
2766  AVDictionaryEntry *t;
2767  SDL_mutex *wait_mutex = SDL_CreateMutex();
2768  int scan_all_pmts_set = 0;
2769  int64_t pkt_ts;
2770 
2771  if (!wait_mutex) {
2772  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2773  ret = AVERROR(ENOMEM);
2774  goto fail;
2775  }
2776 
2777  memset(st_index, -1, sizeof(st_index));
2778  is->eof = 0;
2779 
2780  ic = avformat_alloc_context();
2781  if (!ic) {
2782  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2783  ret = AVERROR(ENOMEM);
2784  goto fail;
2785  }
2788  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2789  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2790  scan_all_pmts_set = 1;
2791  }
2792  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2793  if (err < 0) {
2794  print_error(is->filename, err);
2795  ret = -1;
2796  goto fail;
2797  }
2798  if (scan_all_pmts_set)
2799  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2800 
2802  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2804  goto fail;
2805  }
2806  is->ic = ic;
2807 
2808  if (genpts)
2809  ic->flags |= AVFMT_FLAG_GENPTS;
2810 
2812 
2813  if (find_stream_info) {
2815  int orig_nb_streams = ic->nb_streams;
2816 
2817  err = avformat_find_stream_info(ic, opts);
2818 
2819  for (i = 0; i < orig_nb_streams; i++)
2820  av_dict_free(&opts[i]);
2821  av_freep(&opts);
2822 
2823  if (err < 0) {
2825  "%s: could not find codec parameters\n", is->filename);
2826  ret = -1;
2827  goto fail;
2828  }
2829  }
2830 
2831  if (ic->pb)
2832  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2833 
2834  if (seek_by_bytes < 0)
2835  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2836 
2837  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2838 
2839  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2840  window_title = av_asprintf("%s - %s", t->value, input_filename);
2841 
2842  /* if seeking requested, we execute it */
2843  if (start_time != AV_NOPTS_VALUE) {
2844  int64_t timestamp;
2845 
2846  timestamp = start_time;
2847  /* add the stream start time */
2848  if (ic->start_time != AV_NOPTS_VALUE)
2849  timestamp += ic->start_time;
2850  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2851  if (ret < 0) {
2852  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2853  is->filename, (double)timestamp / AV_TIME_BASE);
2854  }
2855  }
2856 
2857  is->realtime = is_realtime(ic);
2858 
2859  if (show_status)
2860  av_dump_format(ic, 0, is->filename, 0);
2861 
2862  for (i = 0; i < ic->nb_streams; i++) {
2863  AVStream *st = ic->streams[i];
2864  enum AVMediaType type = st->codecpar->codec_type;
2865  st->discard = AVDISCARD_ALL;
2866  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2868  st_index[type] = i;
2869  }
2870  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2871  if (wanted_stream_spec[i] && st_index[i] == -1) {
2872  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2873  st_index[i] = INT_MAX;
2874  }
2875  }
2876 
2877  if (!video_disable)
2878  st_index[AVMEDIA_TYPE_VIDEO] =
2880  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2881  if (!audio_disable)
2882  st_index[AVMEDIA_TYPE_AUDIO] =
2884  st_index[AVMEDIA_TYPE_AUDIO],
2885  st_index[AVMEDIA_TYPE_VIDEO],
2886  NULL, 0);
2888  st_index[AVMEDIA_TYPE_SUBTITLE] =
2890  st_index[AVMEDIA_TYPE_SUBTITLE],
2891  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2892  st_index[AVMEDIA_TYPE_AUDIO] :
2893  st_index[AVMEDIA_TYPE_VIDEO]),
2894  NULL, 0);
2895 
2896  is->show_mode = show_mode;
2897  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2898  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2899  AVCodecParameters *codecpar = st->codecpar;
2901  if (codecpar->width)
2902  set_default_window_size(codecpar->width, codecpar->height, sar);
2903  }
2904 
2905  /* open the streams */
2906  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2908  }
2909 
2910  ret = -1;
2911  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2913  }
2914  if (is->show_mode == SHOW_MODE_NONE)
2915  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2916 
2917  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2919  }
2920 
2921  if (is->video_stream < 0 && is->audio_stream < 0) {
2922  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2923  is->filename);
2924  ret = -1;
2925  goto fail;
2926  }
2927 
2928  if (infinite_buffer < 0 && is->realtime)
2929  infinite_buffer = 1;
2930 
2931  for (;;) {
2932  if (is->abort_request)
2933  break;
2934  if (is->paused != is->last_paused) {
2935  is->last_paused = is->paused;
2936  if (is->paused)
2937  is->read_pause_return = av_read_pause(ic);
2938  else
2939  av_read_play(ic);
2940  }
2941 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2942  if (is->paused &&
2943  (!strcmp(ic->iformat->name, "rtsp") ||
2944  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2945  /* wait 10 ms to avoid trying to get another packet */
2946  /* XXX: horrible */
2947  SDL_Delay(10);
2948  continue;
2949  }
2950 #endif
2951  if (is->seek_req) {
2952  int64_t seek_target = is->seek_pos;
2953  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2954  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2955 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2956 // of the seek_pos/seek_rel variables
2957 
2958  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2959  if (ret < 0) {
2961  "%s: error while seeking\n", is->ic->url);
2962  } else {
2963  if (is->audio_stream >= 0) {
2964  packet_queue_flush(&is->audioq);
2965  packet_queue_put(&is->audioq, &flush_pkt);
2966  }
2967  if (is->subtitle_stream >= 0) {
2968  packet_queue_flush(&is->subtitleq);
2969  packet_queue_put(&is->subtitleq, &flush_pkt);
2970  }
2971  if (is->video_stream >= 0) {
2972  packet_queue_flush(&is->videoq);
2973  packet_queue_put(&is->videoq, &flush_pkt);
2974  }
2975  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2976  set_clock(&is->extclk, NAN, 0);
2977  } else {
2978  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2979  }
2980  }
2981  is->seek_req = 0;
2982  is->queue_attachments_req = 1;
2983  is->eof = 0;
2984  if (is->paused)
2986  }
2987  if (is->queue_attachments_req) {
2988  if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
2989  AVPacket copy;
2990  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2991  goto fail;
2992  packet_queue_put(&is->videoq, &copy);
2993  packet_queue_put_nullpacket(&is->videoq, is->video_stream);
2994  }
2995  is->queue_attachments_req = 0;
2996  }
2997 
2998  /* if the queue are full, no need to read more */
2999  if (infinite_buffer<1 &&
3000  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3001  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
3002  stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq) &&
3003  stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq)))) {
3004  /* wait 10 ms */
3005  SDL_LockMutex(wait_mutex);
3006  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3007  SDL_UnlockMutex(wait_mutex);
3008  continue;
3009  }
3010  if (!is->paused &&
3011  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3012  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3013  if (loop != 1 && (!loop || --loop)) {
3015  } else if (autoexit) {
3016  ret = AVERROR_EOF;
3017  goto fail;
3018  }
3019  }
3020  ret = av_read_frame(ic, pkt);
3021  if (ret < 0) {
3022  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3023  if (is->video_stream >= 0)
3024  packet_queue_put_nullpacket(&is->videoq, is->video_stream);
3025  if (is->audio_stream >= 0)
3026  packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
3027  if (is->subtitle_stream >= 0)
3028  packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
3029  is->eof = 1;
3030  }
3031  if (ic->pb && ic->pb->error)
3032  break;
3033  SDL_LockMutex(wait_mutex);
3034  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3035  SDL_UnlockMutex(wait_mutex);
3036  continue;
3037  } else {
3038  is->eof = 0;
3039  }
3040  /* check if packet is in play range specified by user, then queue, otherwise discard */
3041  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3042  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3043  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3044  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3046  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3047  <= ((double)duration / 1000000);
3048  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3049  packet_queue_put(&is->audioq, pkt);
3050  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3051  && !(is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
3052  packet_queue_put(&is->videoq, pkt);
3053  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3054  packet_queue_put(&is->subtitleq, pkt);
3055  } else {
3057  }
3058  }
3059 
3060  ret = 0;
3061  fail:
3062  if (ic && !is->ic)
3063  avformat_close_input(&ic);
3064 
3065  if (ret != 0) {
3066  SDL_Event event;
3067 
3068  event.type = FF_QUIT_EVENT;
3069  event.user.data1 = is;
3070  SDL_PushEvent(&event);
3071  }
3072  SDL_DestroyMutex(wait_mutex);
3073  return 0;
3074 }
3075 
3076 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3077 {
3078  VideoState *is;
3079 
3080  is = av_mallocz(sizeof(VideoState));
3081  if (!is)
3082  return NULL;
3083  is->last_video_stream = is->video_stream = -1;
3084  is->last_audio_stream = is->audio_stream = -1;
3085  is->last_subtitle_stream = is->subtitle_stream = -1;
3086  is->filename = av_strdup(filename);
3087  if (!is->filename)
3088  goto fail;
3089  is->iformat = iformat;
3090  is->ytop = 0;
3091  is->xleft = 0;
3092 
3093  /* start video display */
3094  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3095  goto fail;
3096  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3097  goto fail;
3098  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3099  goto fail;
3100 
3101  if (packet_queue_init(&is->videoq) < 0 ||
3102  packet_queue_init(&is->audioq) < 0 ||
3103  packet_queue_init(&is->subtitleq) < 0)
3104  goto fail;
3105 
3106  if (!(is->continue_read_thread = SDL_CreateCond())) {
3107  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3108  goto fail;
3109  }
3110 
3111  init_clock(&is->vidclk, &is->videoq.serial);
3112  init_clock(&is->audclk, &is->audioq.serial);
3113  init_clock(&is->extclk, &is->extclk.serial);
3114  is->audio_clock_serial = -1;
3115  if (startup_volume < 0)
3116  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3117  if (startup_volume > 100)
3118  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3119  startup_volume = av_clip(startup_volume, 0, 100);
3120  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3121  is->audio_volume = startup_volume;
3122  is->muted = 0;
3123  is->av_sync_type = av_sync_type;
3124  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3125  if (!is->read_tid) {
3126  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3127 fail:
3128  stream_close(is);
3129  return NULL;
3130  }
3131  return is;
3132 }
3133 
3135 {
3136  AVFormatContext *ic = is->ic;
3137  int start_index, stream_index;
3138  int old_index;
3139  AVStream *st;
3140  AVProgram *p = NULL;
3141  int nb_streams = is->ic->nb_streams;
3142 
3143  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3144  start_index = is->last_video_stream;
3145  old_index = is->video_stream;
3146  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3147  start_index = is->last_audio_stream;
3148  old_index = is->audio_stream;
3149  } else {
3150  start_index = is->last_subtitle_stream;
3151  old_index = is->subtitle_stream;
3152  }
3153  stream_index = start_index;
3154 
3155  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3156  p = av_find_program_from_stream(ic, NULL, is->video_stream);
3157  if (p) {
3159  for (start_index = 0; start_index < nb_streams; start_index++)
3160  if (p->stream_index[start_index] == stream_index)
3161  break;
3162  if (start_index == nb_streams)
3163  start_index = -1;
3164  stream_index = start_index;
3165  }
3166  }
3167 
3168  for (;;) {
3169  if (++stream_index >= nb_streams)
3170  {
3172  {
3173  stream_index = -1;
3174  is->last_subtitle_stream = -1;
3175  goto the_end;
3176  }
3177  if (start_index == -1)
3178  return;
3179  stream_index = 0;
3180  }
3181  if (stream_index == start_index)
3182  return;
3183  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3184  if (st->codecpar->codec_type == codec_type) {
3185  /* check that parameters are OK */
3186  switch (codec_type) {
3187  case AVMEDIA_TYPE_AUDIO:
3188  if (st->codecpar->sample_rate != 0 &&
3189  st->codecpar->channels != 0)
3190  goto the_end;
3191  break;
3192  case AVMEDIA_TYPE_VIDEO:
3193  case AVMEDIA_TYPE_SUBTITLE:
3194  goto the_end;
3195  default:
3196  break;
3197  }
3198  }
3199  }
3200  the_end:
3201  if (p && stream_index != -1)
3202  stream_index = p->stream_index[stream_index];
3203  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3205  old_index,
3206  stream_index);
3207 
3208  stream_component_close(is, old_index);
3209  stream_component_open(is, stream_index);
3210 }
3211 
3212 
3214 {
3216  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3217 }
3218 
3220 {
3221  int next = is->show_mode;
3222  do {
3223  next = (next + 1) % SHOW_MODE_NB;
3224  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3225  if (is->show_mode != next) {
3226  is->force_refresh = 1;
3227  is->show_mode = next;
3228  }
3229 }
3230 
3231 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3232  double remaining_time = 0.0;
3233  SDL_PumpEvents();
3234  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3236  SDL_ShowCursor(0);
3237  cursor_hidden = 1;
3238  }
3239  if (remaining_time > 0.0)
3240  av_usleep((int64_t)(remaining_time * 1000000.0));
3241  remaining_time = REFRESH_RATE;
3242  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3243  video_refresh(is, &remaining_time);
3244  SDL_PumpEvents();
3245  }
3246 }
3247 
3248 static void seek_chapter(VideoState *is, int incr)
3249 {
3250  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3251  int i;
3252 
3253  if (!is->ic->nb_chapters)
3254  return;
3255 
3256  /* find the current chapter */
3257  for (i = 0; i < is->ic->nb_chapters; i++) {
3258  AVChapter *ch = is->ic->chapters[i];
3259  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3260  i--;
3261  break;
3262  }
3263  }
3264 
3265  i += incr;
3266  i = FFMAX(i, 0);
3267  if (i >= is->ic->nb_chapters)
3268  return;
3269 
3270  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3271  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3272  AV_TIME_BASE_Q), 0, 0);
3273 }
3274 
3275 /* handle an event sent by the GUI */
3276 static void event_loop(VideoState *cur_stream)
3277 {
3278  SDL_Event event;
3279  double incr, pos, frac;
3280 
3281  for (;;) {
3282  double x;
3283  refresh_loop_wait_event(cur_stream, &event);
3284  switch (event.type) {
3285  case SDL_KEYDOWN:
3286  if (exit_on_keydown || event.key.keysym.sym == SDLK_ESCAPE || event.key.keysym.sym == SDLK_q) {
3287  do_exit(cur_stream);
3288  break;
3289  }
3290  // If we don't yet have a window, skip all key events, because read_thread might still be initializing...
3291  if (!cur_stream->width)
3292  continue;
3293  switch (event.key.keysym.sym) {
3294  case SDLK_f:
3295  toggle_full_screen(cur_stream);
3296  cur_stream->force_refresh = 1;
3297  break;
3298  case SDLK_p:
3299  case SDLK_SPACE:
3300  toggle_pause(cur_stream);
3301  break;
3302  case SDLK_m:
3303  toggle_mute(cur_stream);
3304  break;
3305  case SDLK_KP_MULTIPLY:
3306  case SDLK_0:
3307  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3308  break;
3309  case SDLK_KP_DIVIDE:
3310  case SDLK_9:
3311  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3312  break;
3313  case SDLK_s: // S: Step to next frame
3314  step_to_next_frame(cur_stream);
3315  break;
3316  case SDLK_a:
3318  break;
3319  case SDLK_v:
3321  break;
3322  case SDLK_c:
3326  break;
3327  case SDLK_t:
3329  break;
3330  case SDLK_w:
3331 #if CONFIG_AVFILTER
3332  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3333  if (++cur_stream->vfilter_idx >= nb_vfilters)
3334  cur_stream->vfilter_idx = 0;
3335  } else {
3336  cur_stream->vfilter_idx = 0;
3337  toggle_audio_display(cur_stream);
3338  }
3339 #else
3340  toggle_audio_display(cur_stream);
3341 #endif
3342  break;
3343  case SDLK_PAGEUP:
3344  if (cur_stream->ic->nb_chapters <= 1) {
3345  incr = 600.0;
3346  goto do_seek;
3347  }
3348  seek_chapter(cur_stream, 1);
3349  break;
3350  case SDLK_PAGEDOWN:
3351  if (cur_stream->ic->nb_chapters <= 1) {
3352  incr = -600.0;
3353  goto do_seek;
3354  }
3355  seek_chapter(cur_stream, -1);
3356  break;
3357  case SDLK_LEFT:
3358  incr = seek_interval ? -seek_interval : -10.0;
3359  goto do_seek;
3360  case SDLK_RIGHT:
3361  incr = seek_interval ? seek_interval : 10.0;
3362  goto do_seek;
3363  case SDLK_UP:
3364  incr = 60.0;
3365  goto do_seek;
3366  case SDLK_DOWN:
3367  incr = -60.0;
3368  do_seek:
3369  if (seek_by_bytes) {
3370  pos = -1;
3371  if (pos < 0 && cur_stream->video_stream >= 0)
3372  pos = frame_queue_last_pos(&cur_stream->pictq);
3373  if (pos < 0 && cur_stream->audio_stream >= 0)
3374  pos = frame_queue_last_pos(&cur_stream->sampq);
3375  if (pos < 0)
3376  pos = avio_tell(cur_stream->ic->pb);
3377  if (cur_stream->ic->bit_rate)
3378  incr *= cur_stream->ic->bit_rate / 8.0;
3379  else
3380  incr *= 180000.0;
3381  pos += incr;
3382  stream_seek(cur_stream, pos, incr, 1);
3383  } else {
3384  pos = get_master_clock(cur_stream);
3385  if (isnan(pos))
3386  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3387  pos += incr;
3388  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3389  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3390  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3391  }
3392  break;
3393  default:
3394  break;
3395  }
3396  break;
3397  case SDL_MOUSEBUTTONDOWN:
3398  if (exit_on_mousedown) {
3399  do_exit(cur_stream);
3400  break;
3401  }
3402  if (event.button.button == SDL_BUTTON_LEFT) {
3403  static int64_t last_mouse_left_click = 0;
3404  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3405  toggle_full_screen(cur_stream);
3406  cur_stream->force_refresh = 1;
3407  last_mouse_left_click = 0;
3408  } else {
3409  last_mouse_left_click = av_gettime_relative();
3410  }
3411  }
3412  case SDL_MOUSEMOTION:
3413  if (cursor_hidden) {
3414  SDL_ShowCursor(1);
3415  cursor_hidden = 0;
3416  }
3418  if (event.type == SDL_MOUSEBUTTONDOWN) {
3419  if (event.button.button != SDL_BUTTON_RIGHT)
3420  break;
3421  x = event.button.x;
3422  } else {
3423  if (!(event.motion.state & SDL_BUTTON_RMASK))
3424  break;
3425  x = event.motion.x;
3426  }
3427  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3428  uint64_t size = avio_size(cur_stream->ic->pb);
3429  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3430  } else {
3431  int64_t ts;
3432  int ns, hh, mm, ss;
3433  int tns, thh, tmm, tss;
3434  tns = cur_stream->ic->duration / 1000000LL;
3435  thh = tns / 3600;
3436  tmm = (tns % 3600) / 60;
3437  tss = (tns % 60);
3438  frac = x / cur_stream->width;
3439  ns = frac * tns;
3440  hh = ns / 3600;
3441  mm = (ns % 3600) / 60;
3442  ss = (ns % 60);
3444  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3445  hh, mm, ss, thh, tmm, tss);
3446  ts = frac * cur_stream->ic->duration;
3447  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3448  ts += cur_stream->ic->start_time;
3449  stream_seek(cur_stream, ts, 0, 0);
3450  }
3451  break;
3452  case SDL_WINDOWEVENT:
3453  switch (event.window.event) {
3454  case SDL_WINDOWEVENT_SIZE_CHANGED:
3455  screen_width = cur_stream->width = event.window.data1;
3456  screen_height = cur_stream->height = event.window.data2;
3457  if (cur_stream->vis_texture) {
3458  SDL_DestroyTexture(cur_stream->vis_texture);
3459  cur_stream->vis_texture = NULL;
3460  }
3461  case SDL_WINDOWEVENT_EXPOSED:
3462  cur_stream->force_refresh = 1;
3463  }
3464  break;
3465  case SDL_QUIT:
3466  case FF_QUIT_EVENT:
3467  do_exit(cur_stream);
3468  break;
3469  default:
3470  break;
3471  }
3472  }
3473 }
3474 
3475 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3476 {
3477  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3478  return opt_default(NULL, "video_size", arg);
3479 }
3480 
3481 static int opt_width(void *optctx, const char *opt, const char *arg)
3482 {
3483  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3484  return 0;
3485 }
3486 
3487 static int opt_height(void *optctx, const char *opt, const char *arg)
3488 {
3489  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3490  return 0;
3491 }
3492 
3493 static int opt_format(void *optctx, const char *opt, const char *arg)
3494 {
3496  if (!file_iformat) {
3497  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3498  return AVERROR(EINVAL);
3499  }
3500  return 0;
3501 }
3502 
3503 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3504 {
3505  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3506  return opt_default(NULL, "pixel_format", arg);
3507 }
3508 
3509 static int opt_sync(void *optctx, const char *opt, const char *arg)
3510 {
3511  if (!strcmp(arg, "audio"))
3513  else if (!strcmp(arg, "video"))
3515  else if (!strcmp(arg, "ext"))
3517  else {
3518  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3519  exit(1);
3520  }
3521  return 0;
3522 }
3523 
3524 static int opt_seek(void *optctx, const char *opt, const char *arg)
3525 {
3526  start_time = parse_time_or_die(opt, arg, 1);
3527  return 0;
3528 }
3529 
3530 static int opt_duration(void *optctx, const char *opt, const char *arg)
3531 {
3532  duration = parse_time_or_die(opt, arg, 1);
3533  return 0;
3534 }
3535 
3536 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3537 {
3538  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3539  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3540  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3541  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3542  return 0;
3543 }
3544 
3545 static void opt_input_file(void *optctx, const char *filename)
3546 {
3547  if (input_filename) {
3549  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3550  filename, input_filename);
3551  exit(1);
3552  }
3553  if (!strcmp(filename, "-"))
3554  filename = "pipe:";
3555  input_filename = filename;
3556 }
3557 
3558 static int opt_codec(void *optctx, const char *opt, const char *arg)
3559 {
3560  const char *spec = strchr(opt, ':');
3561  if (!spec) {
3563  "No media specifier was specified in '%s' in option '%s'\n",
3564  arg, opt);
3565  return AVERROR(EINVAL);
3566  }
3567  spec++;
3568  switch (spec[0]) {
3569  case 'a' : audio_codec_name = arg; break;
3570  case 's' : subtitle_codec_name = arg; break;
3571  case 'v' : video_codec_name = arg; break;
3572  default:
3574  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3575  return AVERROR(EINVAL);
3576  }
3577  return 0;
3578 }
3579 
3580 static int dummy;
3581 
3582 static const OptionDef options[] = {
3584  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3585  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3586  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3587  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3588  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3589  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3590  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3591  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3592  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3593  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3594  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3595  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3596  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3597  { "seek_interval", OPT_FLOAT | HAS_ARG, { &seek_interval }, "set seek interval for left/right keys, in seconds", "seconds" },
3598  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3599  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3600  { "alwaysontop", OPT_BOOL, { &alwaysontop }, "window always on top" },
3601  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3602  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3603  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3604  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3605  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3606  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3607  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3608  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3609  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3610  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3611  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3612  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3613  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3614  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3615  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3616  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3617  { "left", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_left }, "set the x position for the left of the window", "x pos" },
3618  { "top", OPT_INT | HAS_ARG | OPT_EXPERT, { &screen_top }, "set the y position for the top of the window", "y pos" },
3619 #if CONFIG_AVFILTER
3620  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3621  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3622 #endif
3623  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3624  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3625  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3626  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3627  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3628  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3629  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3630  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3631  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3632  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3633  "read and decode the streams to fill missing information with heuristics" },
3634  { "filter_threads", HAS_ARG | OPT_INT | OPT_EXPERT, { &filter_nbthreads }, "number of filter threads per graph" },
3635  { NULL, },
3636 };
3637 
3638 static void show_usage(void)
3639 {
3640  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3641  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3642  av_log(NULL, AV_LOG_INFO, "\n");
3643 }
3644 
3645 void show_help_default(const char *opt, const char *arg)
3646 {
3648  show_usage();
3649  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3650  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3651  printf("\n");
3654 #if !CONFIG_AVFILTER
3656 #else
3658 #endif
3659  printf("\nWhile playing:\n"
3660  "q, ESC quit\n"
3661  "f toggle full screen\n"
3662  "p, SPC pause\n"
3663  "m toggle mute\n"
3664  "9, 0 decrease and increase volume respectively\n"
3665  "/, * decrease and increase volume respectively\n"
3666  "a cycle audio channel in the current program\n"
3667  "v cycle video channel\n"
3668  "t cycle subtitle channel in the current program\n"
3669  "c cycle program\n"
3670  "w cycle video filters or show modes\n"
3671  "s activate frame-step mode\n"
3672  "left/right seek backward/forward 10 seconds or to custom interval if -seek_interval is set\n"
3673  "down/up seek backward/forward 1 minute\n"
3674  "page down/page up seek backward/forward 10 minutes\n"
3675  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3676  "left double-click toggle full screen\n"
3677  );
3678 }
3679 
3680 /* Called from the main */
3681 int main(int argc, char **argv)
3682 {
3683  int flags;
3684  VideoState *is;
3685 
3686  init_dynload();
3687 
3689  parse_loglevel(argc, argv, options);
3690 
3691  /* register all codecs, demux and protocols */
3692 #if CONFIG_AVDEVICE
3694 #endif
3696 
3697  init_opts();
3698 
3699  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3700  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3701 
3702  show_banner(argc, argv, options);
3703 
3704  parse_options(NULL, argc, argv, options, opt_input_file);
3705 
3706  if (!input_filename) {
3707  show_usage();
3708  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3710  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3711  exit(1);
3712  }
3713 
3714  if (display_disable) {
3715  video_disable = 1;
3716  }
3717  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3718  if (audio_disable)
3719  flags &= ~SDL_INIT_AUDIO;
3720  else {
3721  /* Try to work around an occasional ALSA buffer underflow issue when the
3722  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3723  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3724  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3725  }
3726  if (display_disable)
3727  flags &= ~SDL_INIT_VIDEO;
3728  if (SDL_Init (flags)) {
3729  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3730  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3731  exit(1);
3732  }
3733 
3734  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3735  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3736 
3739 
3740  if (!display_disable) {
3741  int flags = SDL_WINDOW_HIDDEN;
3742  if (alwaysontop)
3743 #if SDL_VERSION_ATLEAST(2,0,5)
3744  flags |= SDL_WINDOW_ALWAYS_ON_TOP;
3745 #else
3746  av_log(NULL, AV_LOG_WARNING, "Your SDL version doesn't support SDL_WINDOW_ALWAYS_ON_TOP. Feature will be inactive.\n");
3747 #endif
3748  if (borderless)
3749  flags |= SDL_WINDOW_BORDERLESS;
3750  else
3751  flags |= SDL_WINDOW_RESIZABLE;
3752  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3753  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3754  if (window) {
3755  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3756  if (!renderer) {
3757  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3758  renderer = SDL_CreateRenderer(window, -1, 0);
3759  }
3760  if (renderer) {
3761  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3762  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3763  }
3764  }
3765  if (!window || !renderer || !renderer_info.num_texture_formats) {
3766  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3767  do_exit(NULL);
3768  }
3769  }
3770 
3772  if (!is) {
3773  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3774  do_exit(NULL);
3775  }
3776 
3777  event_loop(is);
3778 
3779  /* never returns */
3780 
3781  return 0;
3782 }
OPT_FLOAT
#define OPT_FLOAT
Definition: cmdutils.h:168
AVSubtitle
Definition: avcodec.h:2694
rect::w
int w
Definition: f_ebur128.c:91
sws_getCachedContext
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2379
do_exit
static void do_exit(VideoState *is)
Definition: ffplay.c:1302
VideoState::seek_rel
int64_t seek_rel
Definition: ffplay.c:215
av_packet_unref
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
AVCodec
AVCodec.
Definition: codec.h:190
opt_frame_size
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3475
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
VideoState::video_st
AVStream * video_st
Definition: ffplay.c:285
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
Frame::width
int width
Definition: ffplay.c:162
AVFMT_NO_BYTE_SEEK
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:470
AudioParams::fmt
enum AVSampleFormat fmt
Definition: ffplay.c:139
av_sync_type
static int av_sync_type
Definition: ffplay.c:331
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
renderer_info
static SDL_RendererInfo renderer_info
Definition: ffplay.c:370
configure_filtergraph
int configure_filtergraph(FilterGraph *fg)
Definition: ffmpeg_filter.c:1011
frame_queue_nb_remaining
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:807
frame_queue_next
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:791
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
Decoder::finished
int finished
Definition: ffplay.c:194
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:869
AVCodecContext::channel_layout
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVProgram::nb_stream_indexes
unsigned int nb_stream_indexes
Definition: avformat.h:1262
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
frame_queue_last_pos
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:813
out
FILE * out
Definition: movenc.c:54
AVCodecContext::sample_rate
int sample_rate
samples per second
Definition: avcodec.h:1186
FFSWAP
#define FFSWAP(type, a, b)
Definition: common.h:99
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
video_thread
static int video_thread(void *arg)
Definition: ffplay.c:2135
VideoState::av_sync_type
int av_sync_type
Definition: ffplay.c:234
is
The official guide to swscale for confused that is
Definition: swscale.txt:28
set_default_window_size
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1328
AV_PIX_FMT_BGR32
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:372
AV_NOSYNC_THRESHOLD
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:87
AVFormatContext::nb_chapters
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1564
AVCodecParameters
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
AV_LOG_QUIET
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
FrameQueue::keep_last
int keep_last
Definition: ffplay.c:176
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:716
VideoState::audio_hw_buf_size
int audio_hw_buf_size
Definition: ffplay.c:244
decoder_decode_frame
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:591
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:55
AV_DISPOSITION_ATTACHED_PIC
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:833
FrameQueue::cond
SDL_cond * cond
Definition: ffplay.c:179
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:117
AVStream::discard
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:920
av_find_program_from_stream
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4234
display_disable
static int display_disable
Definition: ffplay.c:326
screen_width
static int screen_width
Definition: ffplay.c:316
DEBUG
#define DEBUG
Definition: vf_framerate.c:29
sws_dict
AVDictionary * sws_dict
Definition: cmdutils.c:68
swr_set_compensation
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:897
SAMPLE_ARRAY_SIZE
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:105
rect
Definition: f_ebur128.c:91
update_volume
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1514
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
VideoState::auddec
Decoder auddec
Definition: ffplay.c:228
screen_left
static int screen_left
Definition: ffplay.c:318
av_opt_set_int_list
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:712
AudioParams::frame_size
int frame_size
Definition: ffplay.c:140
AVSubtitleRect
Definition: avcodec.h:2659
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
Decoder::next_pts
int64_t next_pts
Definition: ffplay.c:199
decoder_start
static int decoder_start(Decoder *d, int(*fn)(void *), const char *thread_name, void *arg)
Definition: ffplay.c:2124
rect::y
int y
Definition: f_ebur128.c:91
FrameQueue::size
int size
Definition: ffplay.c:174
avformat_get_class
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:170
av_unused
#define av_unused
Definition: attributes.h:131
Frame::sar
AVRational sar
Definition: ffplay.c:165
AudioParams::channel_layout
int64_t channel_layout
Definition: ffplay.c:138
out_size
int out_size
Definition: movenc.c:55
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
upload_texture
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:911
end
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
VideoState::vis_texture
SDL_Texture * vis_texture
Definition: ffplay.c:273
queue_picture
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1747
av_get_channel_layout_string
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Definition: channel_layout.c:211
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AudioParams
Definition: ffplay.c:135
VideoState::subtitle_st
AVStream * subtitle_st
Definition: ffplay.c:278
pixdesc.h
AVFormatContext::streams
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1403
step
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
Definition: rate_distortion.txt:58
VideoState::frame_last_filter_delay
double frame_last_filter_delay
Definition: ffplay.c:283
AVFrame::width
int width
Definition: frame.h:358
VideoState::xleft
int xleft
Definition: ffplay.c:293
Frame::pts
double pts
Definition: ffplay.c:159
w
uint8_t w
Definition: llviddspenc.c:38
AVCOL_RANGE_JPEG
@ AVCOL_RANGE_JPEG
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
OPT_INPUT
#define OPT_INPUT
Definition: cmdutils.h:178
avcodec_decode_subtitle2
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:978
frame_queue_init
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:694
subtitle_codec_name
static const char * subtitle_codec_name
Definition: ffplay.c:346
AVPacket::data
uint8_t * data
Definition: packet.h:355
EXTERNAL_CLOCK_MIN_FRAMES
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:69
HAS_ARG
#define HAS_ARG
Definition: cmdutils.h:161
b
#define b
Definition: input.c:41
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
AVChapter::start
int64_t start
Definition: avformat.h:1295
Clock
Definition: ffplay.c:144
data
const char data[16]
Definition: mxf.c:91
frame_queue_destory
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:715
SAMPLE_QUEUE_SIZE
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:132
program_name
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
Decoder::queue
PacketQueue * queue
Definition: ffplay.c:191
format_opts
AVDictionary * format_opts
Definition: cmdutils.c:70
AVSEEK_FLAG_BYTE
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2497
AVIOContext::error
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
AV_DICT_IGNORE_SUFFIX
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key,...
Definition: dict.h:70
packet_queue_put_nullpacket
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:471
AV_PIX_FMT_RGB32_1
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:371
VideoState::audio_diff_avg_coef
double audio_diff_avg_coef
Definition: ffplay.c:239
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
CURSOR_HIDE_DELAY
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:107
SDL_VOLUME_STEP
#define SDL_VOLUME_STEP
Definition: ffplay.c:78
show_help_children
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:203
AV_PIX_FMT_BGR24
@ AV_PIX_FMT_BGR24
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:69
AV_PIX_FMT_BGRA
@ AV_PIX_FMT_BGRA
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:95
autorotate
static int autorotate
Definition: ffplay.c:356
AVPacket::duration
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
TextureFormatEntry::texture_fmt
int texture_fmt
Definition: ffplay.c:375
video_disable
static int video_disable
Definition: ffplay.c:321
Frame::uploaded
int uploaded
Definition: ffplay.c:166
mathematics.h
sws_scale
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:744
AVDictionary
Definition: dict.c:30
compute_target_delay
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1529
decoder_init
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:582
Frame
Definition: ffplay.c:155
stream_close
static void stream_close(VideoState *is)
Definition: ffplay.c:1265
av_read_frame
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1780
SDL_AUDIO_MAX_CALLBACKS_PER_SEC
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:75
AVFMT_NOBINSEARCH
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:468
VideoState::paused
int paused
Definition: ffplay.c:209
init_clock
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1410
codec_type
enum AVMediaType codec_type
Definition: rtp.c:37
AV_OPT_FLAG_FILTERING_PARAM
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:292
opt_seek
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3524
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
VideoState::iformat
AVInputFormat * iformat
Definition: ffplay.c:206
get_master_clock
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1443
VideoState::width
int width
Definition: ffplay.c:293
sample_rate
sample_rate
Definition: ffmpeg_filter.c:192
dummy
static int dummy
Definition: ffplay.c:3580
update_video_pts
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1571
FF_QUIT_EVENT
#define FF_QUIT_EVENT
Definition: ffplay.c:366
PacketQueue
Definition: ffplay.c:119
subtitle_thread
static int subtitle_thread(void *arg)
Definition: ffplay.c:2242
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
VideoState::last_subtitle_stream
int last_subtitle_stream
Definition: ffplay.c:305
VideoState::SHOW_MODE_NONE
@ SHOW_MODE_NONE
Definition: ffplay.c:263
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:31
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:120
OptionDef
Definition: cmdutils.h:158
audio_decode_frame
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2345
subtitle_disable
static int subtitle_disable
Definition: ffplay.c:322
VideoState::pictq
FrameQueue pictq
Definition: ffplay.c:224
genpts
static int genpts
Definition: ffplay.c:335
VideoState::swr_ctx
struct SwrContext * swr_ctx
Definition: ffplay.c:258
opt_sync
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3509
step_to_next_frame
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1521
VideoState::sampq
FrameQueue sampq
Definition: ffplay.c:226
TextureFormatEntry::format
enum AVPixelFormat format
Definition: ffplay.c:374
FrameQueue::rindex
int rindex
Definition: ffplay.c:172
video_display
static void video_display(VideoState *is)
Definition: ffplay.c:1364
AVCodec::max_lowres
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: codec.h:215
avformat_close_input
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4498
parse_number_or_die
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:141
AVCOL_SPC_BT470BG
@ AVCOL_SPC_BT470BG
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
Definition: pixfmt.h:515
AVFormatContext::interrupt_callback
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1613
avcodec_find_decoder_by_name
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:947
SDL_AUDIO_MIN_BUFFER_SIZE
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:73
print_error
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1085
startup_volume
static int startup_volume
Definition: ffplay.c:329
window
static SDL_Window * window
Definition: ffplay.c:368
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:142
AVCodecParameters::channels
int channels
Audio only.
Definition: codec_par.h:166
toggle_full_screen
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3213
Clock::queue_serial
int * queue_serial
Definition: ffplay.c:151
AVFormatContext::iformat
ff_const59 struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1347
VideoState::extclk
Clock extclk
Definition: ffplay.c:222
VideoState::seek_flags
int seek_flags
Definition: ffplay.c:213
alwaysontop
static int alwaysontop
Definition: ffplay.c:328
VideoState::audio_st
AVStream * audio_st
Definition: ffplay.c:242
packet_queue_init
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:482
AUDIO_DIFF_AVG_NB
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:98
fail
#define fail()
Definition: checkasm.h:123
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
opt_duration
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3530
FrameQueue
Definition: ffplay.c:170
packet_queue_put
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:457
samplefmt.h
AVSubtitleRect::x
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2660
VideoState::video_stream
int video_stream
Definition: ffplay.c:284
autoexit
static int autoexit
Definition: ffplay.c:338
avio_tell
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
AVChapter
Definition: avformat.h:1292
video_image_display
static void video_image_display(VideoState *is)
Definition: ffplay.c:978
val
static double val(void *priv, double ch)
Definition: aeval.c:76
AV_SYNC_AUDIO_MASTER
@ AV_SYNC_AUDIO_MASTER
Definition: ffplay.c:184
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
opt_show_mode
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3536
Decoder::empty_queue_cond
SDL_cond * empty_queue_cond
Definition: ffplay.c:196
pts
static int64_t pts
Definition: transcode_aac.c:647
set_clock_speed
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1404
VideoState::audio_diff_threshold
double audio_diff_threshold
Definition: ffplay.c:240
sws_get_class
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:465
VideoState::audio_diff_cum
double audio_diff_cum
Definition: ffplay.c:238
VideoState::last_video_stream
int last_video_stream
Definition: ffplay.c:305
OPT_STRING
#define OPT_STRING
Definition: cmdutils.h:164
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
fast
static int fast
Definition: ffplay.c:334
fn
#define fn(a)
Definition: colorspacedsp_template.c:42
loop
static int loop
Definition: ffplay.c:341
AVRational::num
int num
Numerator.
Definition: rational.h:59
avformat_network_init
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5059
VideoState::rdft_bits
int rdft_bits
Definition: ffplay.c:269
opt_height
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3487
avsubtitle_free
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1124
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
sdl_texture_format_map
static const struct TextureFormatEntry sdl_texture_format_map[]
AVFormatContext::bit_rate
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1457
is_full_screen
static int is_full_screen
Definition: ffplay.c:361
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
avassert.h
set_sdl_yuv_conversion_mode
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
Definition: ffplay.c:962
lrint
#define lrint
Definition: tablegen.h:53
Frame::flip_v
int flip_v
Definition: ffplay.c:167
av_guess_sample_aspect_ratio
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio.
Definition: utils.c:5122
AV_LOG_TRACE
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
AVFormatContext::metadata
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1575
AVInputFormat
Definition: avformat.h:636
audio_thread
static int audio_thread(void *arg)
Definition: ffplay.c:2037
set_clock
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1398
av_dump_format
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate,...
Definition: dump.c:600
VideoState
Definition: ffplay.c:204
frame_queue_peek_next
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:739
sdl_audio_callback
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2458
sync_clock_to_slave
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1418
swr_init
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
opt_input_file
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3545
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
frame_queue_signal
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:727
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
filter_codec_opts
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2104
OPT_INT
#define OPT_INT
Definition: cmdutils.h:167
VideoState::img_convert_ctx
struct SwsContext * img_convert_ctx
Definition: ffplay.c:288
avcodec_alloc_context3
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
width
#define width
VideoState::ShowMode
ShowMode
Definition: ffplay.c:262
Decoder::avctx
AVCodecContext * avctx
Definition: ffplay.c:192
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_OPT_FLAG_ENCODING_PARAM
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
show_help_default
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3645
AV_CEIL_RSHIFT
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
default_height
static int default_height
Definition: ffplay.c:315
AVFormatContext::flags
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1466
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
AVCodecParameters::sample_aspect_ratio
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
AVFrame::channels
int channels
number of audio channels, only used for audio.
Definition: frame.h:606
AVInputFormat::name
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:641
AV_PIX_FMT_0BGR32
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:375
AVCOL_SPC_SMPTE170M
@ AVCOL_SPC_SMPTE170M
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:516
AVDictionaryEntry::key
char * key
Definition: dict.h:82
Clock::last_updated
double last_updated
Definition: ffplay.c:147
PacketQueue::duration
int64_t duration
Definition: ffplay.c:123
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
AVSubtitleRect::y
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2661
AVCodecParameters::width
int width
Video only.
Definition: codec_par.h:126
calculate_display_rect
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:866
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
screen_height
static int screen_height
Definition: ffplay.c:317
EXTERNAL_CLOCK_SPEED_STEP
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:95
AV_CH_LAYOUT_STEREO_DOWNMIX
#define AV_CH_LAYOUT_STEREO_DOWNMIX
Definition: channel_layout.h:112
Decoder::pkt_serial
int pkt_serial
Definition: ffplay.c:193
opt_frame_pix_fmt
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3503
avcodec_receive_frame
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
AVMEDIA_TYPE_NB
@ AVMEDIA_TYPE_NB
Definition: avutil.h:206
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AV_SYNC_EXTERNAL_CLOCK
@ AV_SYNC_EXTERNAL_CLOCK
Definition: ffplay.c:186
av_read_play
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4319
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
opt_codec
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3558
Clock::pts_drift
double pts_drift
Definition: ffplay.c:146
swr_convert
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:714
VideoState::videoq
PacketQueue videoq
Definition: ffplay.c:286
ctx
AVFormatContext * ctx
Definition: movenc.c:48
channels
channels
Definition: aptx.h:33
limits.h
REFRESH_RATE
#define REFRESH_RATE
Definition: ffplay.c:101
FrameQueue::rindex_shown
int rindex_shown
Definition: ffplay.c:177
nb_streams
static int nb_streams
Definition: ffprobe.c:282
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
VideoState::force_refresh
int force_refresh
Definition: ffplay.c:208
get_clock
static double get_clock(Clock *c)
Definition: ffplay.c:1378
screen_top
static int screen_top
Definition: ffplay.c:319
VideoState::audio_diff_avg_count
int audio_diff_avg_count
Definition: ffplay.c:241
EXTERNAL_CLOCK_SPEED_MIN
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:93
PacketQueue::last_pkt
MyAVPacketList * last_pkt
Definition: ffplay.c:120
sws_flags
static unsigned sws_flags
Definition: ffplay.c:111
parse_options
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:380
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
renderer
static SDL_Renderer * renderer
Definition: ffplay.c:369
av_usleep
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
SwrContext
The libswresample context.
Definition: swresample_internal.h:95
vp_duration
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1559
swr_alloc_set_opts
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
NAN
#define NAN
Definition: mathematics.h:64
f
#define f(width, name)
Definition: cbs_vp9.c:255
AV_PIX_FMT_BGR32_1
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:373
VideoState::step
int step
Definition: ffplay.c:294
av_rdft_calc
void av_rdft_calc(RDFTContext *s, FFTSample *data)
synchronize_audio
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2297
AVCodecContext::codec_id
enum AVCodecID codec_id
Definition: avcodec.h:536
arg
const char * arg
Definition: jacosubdec.c:66
if
if(ret)
Definition: filter_design.txt:179
window_title
static const char * window_title
Definition: ffplay.c:313
Clock::speed
double speed
Definition: ffplay.c:148
VideoState::SHOW_MODE_VIDEO
@ SHOW_MODE_VIDEO
Definition: ffplay.c:263
AVDISCARD_ALL
@ AVDISCARD_ALL
discard all
Definition: avcodec.h:236
AVFormatContext
Format I/O context.
Definition: avformat.h:1335
av_log_get_level
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:520
init_dynload
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
opts
AVDictionary * opts
Definition: movenc.c:50
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1012
AVSubtitleRect::w
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2662
seek_chapter
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3248
get_master_sync_type
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1426
avcodec_get_class
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:295
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1267
AVStream::time_base
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avformat.h:894
NULL
#define NULL
Definition: coverity.c:32
stream_open
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3076
FrameQueue::max_size
int max_size
Definition: ffplay.c:175
OPT_EXPERT
#define OPT_EXPERT
Definition: cmdutils.h:163
AV_PIX_FMT_YUYV422
@ AV_PIX_FMT_YUYV422
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:67
Decoder
Definition: ffplay.c:189
AudioParams::freq
int freq
Definition: ffplay.c:136
avcodec_free_context
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:172
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
isnan
#define isnan(x)
Definition: libm.h:340
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:842
stream_cycle_channel
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3134
VideoState::frame_drops_late
int frame_drops_late
Definition: ffplay.c:260
AV_CODEC_FLAG2_FAST
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:348
VideoState::rdft
RDFTContext * rdft
Definition: ffplay.c:268
AVFormatContext::pb
AVIOContext * pb
I/O context.
Definition: avformat.h:1377
av_log_set_flags
void av_log_set_flags(int arg)
Definition: log.c:445
parseutils.h
frame_queue_unref_item
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:688
FrameQueue::queue
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:171
VideoState::last_i_start
int last_i_start
Definition: ffplay.c:267
init_opts
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:82
Decoder::packet_pending
int packet_pending
Definition: ffplay.c:195
cursor_last_shown
static int64_t cursor_last_shown
Definition: ffplay.c:349
AVProgram::stream_index
unsigned int * stream_index
Definition: avformat.h:1261
frame_queue_peek
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:734
OPT_INT64
#define OPT_INT64
Definition: cmdutils.h:170
avfilter_inout_alloc
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:207
Frame::duration
double duration
Definition: ffplay.c:160
DFT_R2C
@ DFT_R2C
Definition: avfft.h:72
lowres
static int lowres
Definition: ffplay.c:336
AV_DICT_DONT_OVERWRITE
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:76
AV_PIX_FMT_RGB8
@ AV_PIX_FMT_RGB8
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:86
avcodec_open2
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:565
time.h
Frame::pos
int64_t pos
Definition: ffplay.c:161
FFTSample
float FFTSample
Definition: avfft.h:35
avfft.h
VideoState::frame_last_returned_time
double frame_last_returned_time
Definition: ffplay.c:282
set_clock_at
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1390
toggle_pause
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1503
TextureFormatEntry
Definition: ffplay.c:373
AVFilterGraph
Definition: avfilter.h:840
stream_component_open
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2575
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
av_buffersink_get_channel_layout
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
av_packet_ref
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:614
fp
#define fp
Definition: regdef.h:44
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:663
AV_PIX_FMT_NE
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:367
exp
int8_t exp
Definition: eval.c:72
VideoState::seek_req
int seek_req
Definition: ffplay.c:212
VideoState::SHOW_MODE_WAVES
@ SHOW_MODE_WAVES
Definition: ffplay.c:263
VideoState::audio_clock
double audio_clock
Definition: ffplay.c:236
VideoState::read_pause_return
int read_pause_return
Definition: ffplay.c:216
event_loop
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3276
swresample.h
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
VideoState::ytop
int ytop
Definition: ffplay.c:293
AVCodecParameters::sample_rate
int sample_rate
Audio only.
Definition: codec_par.h:170
av_get_channel_layout_nb_channels
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
Definition: channel_layout.c:220
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
VideoState::sample_array
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:265
AVFormatContext::nb_streams
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1391
exit_on_mousedown
static int exit_on_mousedown
Definition: ffplay.c:340
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
Decoder::next_pts_tb
AVRational next_pts_tb
Definition: ffplay.c:200
AVCodecContext::lowres
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:1765
VideoState::audioq
PacketQueue audioq
Definition: ffplay.c:243
codec_opts
AVDictionary * codec_opts
Definition: cmdutils.c:70
audio_callback_time
static int64_t audio_callback_time
Definition: ffplay.c:362
eval.h
avformat_find_stream_info
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3622
Frame::format
int format
Definition: ffplay.c:164
get_valid_channel_layout
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:420
AVCodecContext::flags2
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:613
AVFrame::pict_type
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
AV_PIX_FMT_RGB24
@ AV_PIX_FMT_RGB24
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:68
AVMediaType
AVMediaType
Definition: avutil.h:199
av_log_set_callback
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
AVPacket::size
int size
Definition: packet.h:356
avformat_match_stream_specifier
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5329
VideoState::audio_write_buf_size
int audio_write_buf_size
Definition: ffplay.c:250
av_rdft_init
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
copy
static void copy(const float *p1, float *p2, const int length)
Definition: vf_vaguedenoiser.c:194
avformat_alloc_context
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
AVDISCARD_DEFAULT
@ AVDISCARD_DEFAULT
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
FrameQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:178
frame_queue_peek_writable
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:749
OPT_AUDIO
#define OPT_AUDIO
Definition: cmdutils.h:166
Frame::sub
AVSubtitle sub
Definition: ffplay.c:157
VideoState::last_audio_stream
int last_audio_stream
Definition: ffplay.c:305
Decoder::pkt
AVPacket pkt
Definition: ffplay.c:190
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:472
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
sp
#define sp
Definition: regdef.h:63
start_time
static int64_t start_time
Definition: ffplay.c:332
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
VideoState::SHOW_MODE_NB
@ SHOW_MODE_NB
Definition: ffplay.c:263
AVCodecContext::sample_fmt
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
Frame::serial
int serial
Definition: ffplay.c:158
AVCodecContext::pkt_timebase
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
uninit_opts
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents.
Definition: cmdutils.c:87
size
int size
Definition: twinvq_data.h:11134
VideoState::xpos
int xpos
Definition: ffplay.c:271
av_make_q
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
read_thread
static int read_thread(void *arg)
Definition: ffplay.c:2757
AV_PIX_FMT_BGR555
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:390
avformat_seek_file
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2550
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
Clock::paused
int paused
Definition: ffplay.c:150
rect::h
int h
Definition: f_ebur128.c:91
VideoState::sub_texture
SDL_Texture * sub_texture
Definition: ffplay.c:274
av_guess_frame_rate
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5145
swr_free
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
VideoState::vid_texture
SDL_Texture * vid_texture
Definition: ffplay.c:275
setup_find_stream_info_opts
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2161
printf
printf("static const uint8_t my_array[100] = {\n")
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:373
VideoState::sample_array_index
int sample_array_index
Definition: ffplay.c:266
wanted_stream_spec
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:323
infinite_buffer
static int infinite_buffer
Definition: ffplay.c:343
VideoState::max_frame_duration
double max_frame_duration
Definition: ffplay.c:287
avdevice.h
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:558
packet_queue_destroy
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:517
AVPacket::dts
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:354
height
#define height
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:370
VideoState::frame_drops_early
int frame_drops_early
Definition: ffplay.c:259
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AVFrame::channel_layout
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:477
toggle_mute
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1509
av_format_inject_global_side_data
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:149
decoder_abort
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:822
video_refresh
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1578
ns
#define ns(max_value, name, subs,...)
Definition: cbs_av1.c:686
seek_interval
static float seek_interval
Definition: ffplay.c:325
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:203
rect::x
int x
Definition: f_ebur128.c:91
VideoState::seek_pos
int64_t seek_pos
Definition: ffplay.c:214
frame_queue_push
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:781
audio_dev
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:371
sigterm_handler
static void sigterm_handler(int sig)
Definition: ffplay.c:1323
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
sample_rates
sample_rates
Definition: ffmpeg_filter.c:192
packet_queue_abort
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:524
video_codec_name
static const char * video_codec_name
Definition: ffplay.c:347
AVCodecContext::channels
int channels
number of audio channels
Definition: avcodec.h:1187
buffersink.h
AVCodec::id
enum AVCodecID id
Definition: codec.h:204
show_banner
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1184
packet_queue_flush
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:499
packet_queue_get
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:544
av_get_picture_type_char
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
MAX_QUEUE_SIZE
#define MAX_QUEUE_SIZE
Definition: ffplay.c:67
AVCOL_SPC_SMPTE240M
@ AVCOL_SPC_SMPTE240M
functionally identical to above
Definition: pixfmt.h:517
avcodec_get_name
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1206
MIN_FRAMES
#define MIN_FRAMES
Definition: ffplay.c:68
VideoState::queue_attachments_req
int queue_attachments_req
Definition: ffplay.c:211
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
avcodec_send_packet
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
av_find_input_format
ff_const59 AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
FrameQueue::windex
int windex
Definition: ffplay.c:173
VideoState::filename
char * filename
Definition: ffplay.c:292
VideoState::muted
int muted
Definition: ffplay.c:252
Decoder::start_pts
int64_t start_pts
Definition: ffplay.c:197
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
bprint.h
Clock::pts
double pts
Definition: ffplay.c:145
VIDEO_PICTURE_QUEUE_SIZE
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:130
PacketQueue::serial
int serial
Definition: ffplay.c:125
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
AVPacket::pts
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
VideoState::show_mode
enum VideoState::ShowMode show_mode
VideoState::audio_src
struct AudioParams audio_src
Definition: ffplay.c:253
program_birth_year
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:65
avformat_open_input
int avformat_open_input(AVFormatContext **ps, const char *url, ff_const59 AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:536
VideoState::audio_buf1
uint8_t * audio_buf1
Definition: ffplay.c:246
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
avfilter_graph_parse_ptr
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:549
swr_opts
AVDictionary * swr_opts
Definition: cmdutils.c:69
compute_mod
static int compute_mod(int a, int b)
Definition: ffplay.c:1061
Decoder::start_pts_tb
AVRational start_pts_tb
Definition: ffplay.c:198
VideoState::rdft_data
FFTSample * rdft_data
Definition: ffplay.c:270
AVCodecParameters::height
int height
Definition: codec_par.h:127
avcodec_parameters_to_context
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2184
AV_TIME_BASE
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:347
file_iformat
static AVInputFormat * file_iformat
Definition: ffplay.c:311
av_malloc_array
#define av_malloc_array(a, b)
Definition: tableprint_vlc.h:32
video_open
static int video_open(VideoState *is)
Definition: ffplay.c:1340
RDFTContext
Definition: rdft.h:28
get_sdl_pix_fmt_and_blendmode
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:893
show_status
static int show_status
Definition: ffplay.c:330
opt_format
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3493
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
AV_PIX_FMT_RGB555
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:385
AV_OPT_FLAG_DECODING_PARAM
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:583
borderless
static int borderless
Definition: ffplay.c:327
uint8_t
uint8_t
Definition: audio_convert.c:194
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
update_sample_display
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2277
MyAVPacketList
Definition: ffplay.c:113
AV_SAMPLE_FMT_S16
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
AVProgram
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1257
av_read_pause
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4328
len
int len
Definition: vorbis_enc_data.h:452
Frame::frame
AVFrame * frame
Definition: ffplay.c:156
AV_PIX_FMT_BGR565
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:389
av_samples_get_buffer_size
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
PacketQueue::nb_packets
int nb_packets
Definition: ffplay.c:121
av_rescale
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
PacketQueue::first_pkt
MyAVPacketList * first_pkt
Definition: ffplay.c:120
FRAME_QUEUE_SIZE
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:133
frame_queue_peek_readable
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:765
AVIOInterruptCB::opaque
void * opaque
Definition: avio.h:60
AV_PIX_FMT_RGB565
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:384
parse_loglevel
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:503
av_cmp_q
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
Clock::serial
int serial
Definition: ffplay.c:149
VideoState::height
int height
Definition: ffplay.c:293
AV_PIX_FMT_PAL8
@ AV_PIX_FMT_PAL8
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:77
AVStream::disposition
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:918
AVFMT_FLAG_GENPTS
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1467
VideoState::subpq
FrameQueue subpq
Definition: ffplay.c:225
seek_by_bytes
static int seek_by_bytes
Definition: ffplay.c:324
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:865
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
stream_seek
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1476
EXTERNAL_CLOCK_MAX_FRAMES
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:70
AVSubtitleRect::h
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2663
AV_SYNC_VIDEO_MASTER
@ AV_SYNC_VIDEO_MASTER
Definition: ffplay.c:185
avcodec_flush_buffers
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1080
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
AV_PIX_FMT_0RGB32
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:374
video_stream
static AVStream * video_stream
Definition: demuxing_decoding.c:41
audio_open
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2501
filter_nbthreads
static int filter_nbthreads
Definition: ffplay.c:358
avcodec_find_decoder
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:919
log_callback_help
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:96
cursor_hidden
static int cursor_hidden
Definition: ffplay.c:350
VideoState::SHOW_MODE_RDFT
@ SHOW_MODE_RDFT
Definition: ffplay.c:263
find_stream_info
static int find_stream_info
Definition: ffplay.c:357
packet_queue_put_private
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:428
pos
unsigned int pos
Definition: spdifenc.c:412
VideoState::audio_buf_index
int audio_buf_index
Definition: ffplay.c:249
avformat.h
iformat
static AVInputFormat * iformat
Definition: ffprobe.c:259
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
dict.h
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
VideoState::last_paused
int last_paused
Definition: ffplay.c:210
AV_LOG_SKIP_REPEATED
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:366
AV_PIX_FMT_UYVY422
@ AV_PIX_FMT_UYVY422
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:81
AV_DICT_MATCH_CASE
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen_template.c:38
rdftspeed
double rdftspeed
Definition: ffplay.c:348
MyAVPacketList::serial
int serial
Definition: ffplay.c:116
opt_width
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3481
main
int main(int argc, char **argv)
Definition: ffplay.c:3681
avformat_network_deinit
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5071
show_usage
static void show_usage(void)
Definition: ffplay.c:3638
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodecContext
main external API structure.
Definition: avcodec.h:526
AVFrame::height
int height
Definition: frame.h:358
CMDUTILS_COMMON_OPTIONS
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:212
PacketQueue::mutex
SDL_mutex * mutex
Definition: ffplay.c:126
packet_queue_start
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:535
VideoState::vidclk
Clock vidclk
Definition: ffplay.c:221
audio_codec_name
static const char * audio_codec_name
Definition: ffplay.c:345
get_rotation
double get_rotation(AVStream *st)
Definition: cmdutils.c:2200
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
AV_SYNC_FRAMEDUP_THRESHOLD
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:85
show_mode
static enum ShowMode show_mode
Definition: ffplay.c:344
pkt
static AVPacket pkt
Definition: demuxing_decoding.c:54
PacketQueue::cond
SDL_cond * cond
Definition: ffplay.c:127
sws_freeContext
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2319
PacketQueue::size
int size
Definition: ffplay.c:122
options
static const OptionDef options[]
Definition: ffplay.c:3582
AVInputFormat::flags
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS,...
Definition: avformat.h:655
AudioParams::channels
int channels
Definition: ffplay.c:137
AVRational::den
int den
Denominator.
Definition: rational.h:60
mode
mode
Definition: ebur128.h:83
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
VideoState::subtitle_stream
int subtitle_stream
Definition: ffplay.c:277
avfilter.h
VideoState::abort_request
int abort_request
Definition: ffplay.c:207
fill_rectangle
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:831
flush_pkt
static AVPacket flush_pkt
Definition: ffplay.c:364
VideoState::audio_buf1_size
unsigned int audio_buf1_size
Definition: ffplay.c:248
VideoState::eof
int eof
Definition: ffplay.c:290
AVFMT_NOGENSEARCH
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:469
AV_SYNC_THRESHOLD_MAX
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:83
av_buffersink_get_channels
int av_buffersink_get_channels(const AVFilterContext *ctx)
decoder_destroy
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:683
av_get_packed_sample_fmt
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
VideoState::read_tid
SDL_Thread * read_tid
Definition: ffplay.c:205
VideoState::audio_volume
int audio_volume
Definition: ffplay.c:251
VideoState::subdec
Decoder subdec
Definition: ffplay.c:230
AVIOContext::eof_reached
int eof_reached
true if was unable to read due to error or eof
Definition: avio.h:239
stream_has_enough_packets
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2733
samples
Filter the word “frame” indicates either a video frame or a group of audio samples
Definition: filter_design.txt:8
AVFormatContext::duration
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1450
av_mul_q
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVPacket::stream_index
int stream_index
Definition: packet.h:357
GROW_ARRAY
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:619
SUBPICTURE_QUEUE_SIZE
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:131
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
input_filename
static const char * input_filename
Definition: ffplay.c:312
stream_toggle_pause
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1490
VideoState::continue_read_thread
SDL_cond * continue_read_thread
Definition: ffplay.c:307
av_dict_set_int
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it.
Definition: dict.c:147
toggle_audio_display
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3219
AVCodecContext::codec_type
enum AVMediaType codec_type
Definition: avcodec.h:534
opt_default
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions.
Definition: cmdutils.c:542
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVFMT_TS_DISCONT
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
get_video_frame
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1778
av_get_default_channel_layout
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
Definition: channel_layout.c:225
default_width
static int default_width
Definition: ffplay.c:314
AVIOInterruptCB::callback
int(* callback)(void *)
Definition: avio.h:59
VideoState::realtime
int realtime
Definition: ffplay.c:218
VideoState::sub_convert_ctx
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:289
diff
static av_always_inline int diff(const uint32_t a, const uint32_t b)
Definition: vf_palettegen.c:136
OPT_VIDEO
#define OPT_VIDEO
Definition: cmdutils.h:165
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AVDictionaryEntry
Definition: dict.h:81
MyAVPacketList::pkt
AVPacket pkt
Definition: ffplay.c:114
AVPacket
This structure stores compressed data.
Definition: packet.h:332
audio_disable
static int audio_disable
Definition: ffplay.c:320
refresh_loop_wait_event
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3231
channel_layouts
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_fast_malloc
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:502
stream_component_close
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1208
VideoState::subtitleq
PacketQueue subtitleq
Definition: ffplay.c:279
cmdutils.h
cmp_audio_fmts
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:409
OPT_BOOL
#define OPT_BOOL
Definition: cmdutils.h:162
Decoder::decoder_tid
SDL_Thread * decoder_tid
Definition: ffplay.c:201
parse_time_or_die
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds.
Definition: cmdutils.c:162
framedrop
static int framedrop
Definition: ffplay.c:342
VideoState::audio_stream
int audio_stream
Definition: ffplay.c:232
imgutils.h
VideoState::audio_buf_size
unsigned int audio_buf_size
Definition: ffplay.c:247
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:565
av_rdft_end
void av_rdft_end(RDFTContext *s)
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
MyAVPacketList::next
struct MyAVPacketList * next
Definition: ffplay.c:115
block
The exact code depends on how similar the blocks are and how related they are to the block
Definition: filter_design.txt:207
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
AVFormatContext::start_time
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1440
PacketQueue::abort_request
int abort_request
Definition: ffplay.c:124
VideoState::ic
AVFormatContext * ic
Definition: ffplay.c:217
VideoState::viddec
Decoder viddec
Definition: ffplay.c:229
h
h
Definition: vp9dsp_template.c:2038
AVDictionaryEntry::value
char * value
Definition: dict.h:83
AVStream::start_time
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base.
Definition: avformat.h:904
VideoState::audio_buf
uint8_t * audio_buf
Definition: ffplay.c:245
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:843
avstring.h
frame_queue_peek_last
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:744
VideoState::last_vis_time
double last_vis_time
Definition: ffplay.c:272
show_help_options
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:174
decoder_reorder_pts
static int decoder_reorder_pts
Definition: ffplay.c:337
AVCOL_SPC_BT709
@ AVCOL_SPC_BT709
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:511
VideoState::audio_tgt
struct AudioParams audio_tgt
Definition: ffplay.c:257
AVChapter::time_base
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1294
audio_stream
static AVStream * audio_stream
Definition: demuxing_decoding.c:41
int
int
Definition: ffmpeg_filter.c:192
SwsContext
Definition: swscale_internal.h:280
VideoState::audclk
Clock audclk
Definition: ffplay.c:220
avfilter_get_class
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1630
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
FrameQueue::pktq
PacketQueue * pktq
Definition: ffplay.c:180
short
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option keep it simple and lowercase description are short
Definition: writing_filters.txt:89
snprintf
#define snprintf
Definition: snprintf.h:34
video_audio_display
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1066
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
AV_SYNC_THRESHOLD_MIN
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:81
buffersrc.h
AudioParams::bytes_per_sec
int bytes_per_sec
Definition: ffplay.c:141
check_external_clock_speed
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1461
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2696
SAMPLE_CORRECTION_PERCENT_MAX
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:90
EXTERNAL_CLOCK_SPEED_MAX
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:94
SWS_BICUBIC
#define SWS_BICUBIC
Definition: swscale.h:60
duration
static int64_t duration
Definition: ffplay.c:333
swscale.h
is_realtime
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2740
av_x_if_null
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
Frame::height
int height
Definition: ffplay.c:163
decode_interrupt_cb
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2727
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
VideoState::frame_timer
double frame_timer
Definition: ffplay.c:281
VideoState::audio_clock_serial
int audio_clock_serial
Definition: ffplay.c:237
av_init_packet
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
av_find_best_stream
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4251
avdevice_register_all
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
nb_channels
int nb_channels
Definition: channel_layout.c:76
avio_feof
int avio_feof(AVIOContext *s)
Similar to feof() but also returns nonzero on read errors.
Definition: aviobuf.c:356
realloc_texture
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:842
AV_PIX_FMT_RGB444
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:386
exit_on_keydown
static int exit_on_keydown
Definition: ffplay.c:339