FFmpeg
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control */
77 #define SDL_VOLUME_STEP (SDL_MIX_MAXVOLUME / 50)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  SDL_Texture *bmp;
163  int width;
164  int height;
165  int format;
167  int uploaded;
168 } Frame;
169 
170 typedef struct FrameQueue {
172  int rindex;
173  int windex;
174  int size;
175  int max_size;
178  SDL_mutex *mutex;
179  SDL_cond *cond;
181 } FrameQueue;
182 
183 enum {
184  AV_SYNC_AUDIO_MASTER, /* default choice */
186  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
187 };
188 
189 typedef struct Decoder {
195  int finished;
197  SDL_cond *empty_queue_cond;
198  int64_t start_pts;
200  int64_t next_pts;
202  SDL_Thread *decoder_tid;
203 } Decoder;
204 
205 typedef struct VideoState {
206  SDL_Thread *read_tid;
210  int paused;
213  int seek_req;
215  int64_t seek_pos;
216  int64_t seek_rel;
219  int realtime;
220 
224 
228 
232 
234 
236 
237  double audio_clock;
239  double audio_diff_cum; /* used for AV difference average computation */
248  unsigned int audio_buf_size; /* in bytes */
249  unsigned int audio_buf1_size;
250  int audio_buf_index; /* in bytes */
253  int muted;
255 #if CONFIG_AVFILTER
256  struct AudioParams audio_filter_src;
257 #endif
262 
263  enum ShowMode {
265  } show_mode;
272  int xpos;
274  SDL_Texture *vis_texture;
275  SDL_Texture *sub_texture;
276 
280 
281  double frame_timer;
287  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
290  int eof;
291 
292  char *filename;
294  int step;
295 
296 #if CONFIG_AVFILTER
297  int vfilter_idx;
298  AVFilterContext *in_video_filter; // the first filter in the video chain
299  AVFilterContext *out_video_filter; // the last filter in the video chain
300  AVFilterContext *in_audio_filter; // the first filter in the audio chain
301  AVFilterContext *out_audio_filter; // the last filter in the audio chain
302  AVFilterGraph *agraph; // audio filter graph
303 #endif
304 
306 
308 } VideoState;
309 
310 /* options specified by the user */
312 static const char *input_filename;
313 static const char *window_title;
314 static int default_width = 640;
315 static int default_height = 480;
316 static int screen_width = 0;
317 static int screen_height = 0;
318 static int audio_disable;
319 static int video_disable;
320 static int subtitle_disable;
321 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
322 static int seek_by_bytes = -1;
323 static int display_disable;
324 static int show_status = 1;
326 static int64_t start_time = AV_NOPTS_VALUE;
327 static int64_t duration = AV_NOPTS_VALUE;
328 static int fast = 0;
329 static int genpts = 0;
330 static int lowres = 0;
331 static int decoder_reorder_pts = -1;
332 static int autoexit;
333 static int exit_on_keydown;
334 static int exit_on_mousedown;
335 static int loop = 1;
336 static int framedrop = -1;
337 static int infinite_buffer = -1;
338 static enum ShowMode show_mode = SHOW_MODE_NONE;
339 static const char *audio_codec_name;
340 static const char *subtitle_codec_name;
341 static const char *video_codec_name;
342 double rdftspeed = 0.02;
343 static int64_t cursor_last_shown;
344 static int cursor_hidden = 0;
345 #if CONFIG_AVFILTER
346 static const char **vfilters_list = NULL;
347 static int nb_vfilters = 0;
348 static char *afilters = NULL;
349 #endif
350 static int autorotate = 1;
351 
352 /* current context */
353 static int is_full_screen;
354 static int64_t audio_callback_time;
355 
357 
358 #define FF_ALLOC_EVENT (SDL_USEREVENT)
359 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
360 
361 static SDL_Window *window;
362 static SDL_Renderer *renderer;
363 
364 #if CONFIG_AVFILTER
365 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
366 {
367  GROW_ARRAY(vfilters_list, nb_vfilters);
368  vfilters_list[nb_vfilters - 1] = arg;
369  return 0;
370 }
371 #endif
372 
373 static inline
374 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
375  enum AVSampleFormat fmt2, int64_t channel_count2)
376 {
377  /* If channel count == 1, planar and non-planar formats are the same */
378  if (channel_count1 == 1 && channel_count2 == 1)
380  else
381  return channel_count1 != channel_count2 || fmt1 != fmt2;
382 }
383 
384 static inline
385 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
386 {
387  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
388  return channel_layout;
389  else
390  return 0;
391 }
392 
393 static void free_picture(Frame *vp);
394 
396 {
397  MyAVPacketList *pkt1;
398 
399  if (q->abort_request)
400  return -1;
401 
402  pkt1 = av_malloc(sizeof(MyAVPacketList));
403  if (!pkt1)
404  return -1;
405  pkt1->pkt = *pkt;
406  pkt1->next = NULL;
407  if (pkt == &flush_pkt)
408  q->serial++;
409  pkt1->serial = q->serial;
410 
411  if (!q->last_pkt)
412  q->first_pkt = pkt1;
413  else
414  q->last_pkt->next = pkt1;
415  q->last_pkt = pkt1;
416  q->nb_packets++;
417  q->size += pkt1->pkt.size + sizeof(*pkt1);
418  q->duration += pkt1->pkt.duration;
419  /* XXX: should duplicate packet data in DV case */
420  SDL_CondSignal(q->cond);
421  return 0;
422 }
423 
425 {
426  int ret;
427 
428  SDL_LockMutex(q->mutex);
429  ret = packet_queue_put_private(q, pkt);
430  SDL_UnlockMutex(q->mutex);
431 
432  if (pkt != &flush_pkt && ret < 0)
433  av_packet_unref(pkt);
434 
435  return ret;
436 }
437 
438 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
439 {
440  AVPacket pkt1, *pkt = &pkt1;
441  av_init_packet(pkt);
442  pkt->data = NULL;
443  pkt->size = 0;
444  pkt->stream_index = stream_index;
445  return packet_queue_put(q, pkt);
446 }
447 
448 /* packet queue handling */
450 {
451  memset(q, 0, sizeof(PacketQueue));
452  q->mutex = SDL_CreateMutex();
453  if (!q->mutex) {
454  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
455  return AVERROR(ENOMEM);
456  }
457  q->cond = SDL_CreateCond();
458  if (!q->cond) {
459  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
460  return AVERROR(ENOMEM);
461  }
462  q->abort_request = 1;
463  return 0;
464 }
465 
467 {
468  MyAVPacketList *pkt, *pkt1;
469 
470  SDL_LockMutex(q->mutex);
471  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
472  pkt1 = pkt->next;
473  av_packet_unref(&pkt->pkt);
474  av_freep(&pkt);
475  }
476  q->last_pkt = NULL;
477  q->first_pkt = NULL;
478  q->nb_packets = 0;
479  q->size = 0;
480  q->duration = 0;
481  SDL_UnlockMutex(q->mutex);
482 }
483 
485 {
487  SDL_DestroyMutex(q->mutex);
488  SDL_DestroyCond(q->cond);
489 }
490 
492 {
493  SDL_LockMutex(q->mutex);
494 
495  q->abort_request = 1;
496 
497  SDL_CondSignal(q->cond);
498 
499  SDL_UnlockMutex(q->mutex);
500 }
501 
503 {
504  SDL_LockMutex(q->mutex);
505  q->abort_request = 0;
506  packet_queue_put_private(q, &flush_pkt);
507  SDL_UnlockMutex(q->mutex);
508 }
509 
510 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
511 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
512 {
513  MyAVPacketList *pkt1;
514  int ret;
515 
516  SDL_LockMutex(q->mutex);
517 
518  for (;;) {
519  if (q->abort_request) {
520  ret = -1;
521  break;
522  }
523 
524  pkt1 = q->first_pkt;
525  if (pkt1) {
526  q->first_pkt = pkt1->next;
527  if (!q->first_pkt)
528  q->last_pkt = NULL;
529  q->nb_packets--;
530  q->size -= pkt1->pkt.size + sizeof(*pkt1);
531  q->duration -= pkt1->pkt.duration;
532  *pkt = pkt1->pkt;
533  if (serial)
534  *serial = pkt1->serial;
535  av_free(pkt1);
536  ret = 1;
537  break;
538  } else if (!block) {
539  ret = 0;
540  break;
541  } else {
542  SDL_CondWait(q->cond, q->mutex);
543  }
544  }
545  SDL_UnlockMutex(q->mutex);
546  return ret;
547 }
548 
549 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
550  memset(d, 0, sizeof(Decoder));
551  d->avctx = avctx;
552  d->queue = queue;
553  d->empty_queue_cond = empty_queue_cond;
555 }
556 
558  int got_frame = 0;
559 
560  do {
561  int ret = -1;
562 
563  if (d->queue->abort_request)
564  return -1;
565 
566  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
567  AVPacket pkt;
568  do {
569  if (d->queue->nb_packets == 0)
570  SDL_CondSignal(d->empty_queue_cond);
571  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
572  return -1;
573  if (pkt.data == flush_pkt.data) {
575  d->finished = 0;
576  d->next_pts = d->start_pts;
577  d->next_pts_tb = d->start_pts_tb;
578  }
579  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
580  av_packet_unref(&d->pkt);
581  d->pkt_temp = d->pkt = pkt;
582  d->packet_pending = 1;
583  }
584 
585  switch (d->avctx->codec_type) {
586  case AVMEDIA_TYPE_VIDEO:
587  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
588  if (got_frame) {
589  if (decoder_reorder_pts == -1) {
590  frame->pts = av_frame_get_best_effort_timestamp(frame);
591  } else if (!decoder_reorder_pts) {
592  frame->pts = frame->pkt_dts;
593  }
594  }
595  break;
596  case AVMEDIA_TYPE_AUDIO:
597  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
598  if (got_frame) {
599  AVRational tb = (AVRational){1, frame->sample_rate};
600  if (frame->pts != AV_NOPTS_VALUE)
601  frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
602  else if (d->next_pts != AV_NOPTS_VALUE)
603  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
604  if (frame->pts != AV_NOPTS_VALUE) {
605  d->next_pts = frame->pts + frame->nb_samples;
606  d->next_pts_tb = tb;
607  }
608  }
609  break;
611  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
612  break;
613  }
614 
615  if (ret < 0) {
616  d->packet_pending = 0;
617  } else {
618  d->pkt_temp.dts =
620  if (d->pkt_temp.data) {
622  ret = d->pkt_temp.size;
623  d->pkt_temp.data += ret;
624  d->pkt_temp.size -= ret;
625  if (d->pkt_temp.size <= 0)
626  d->packet_pending = 0;
627  } else {
628  if (!got_frame) {
629  d->packet_pending = 0;
630  d->finished = d->pkt_serial;
631  }
632  }
633  }
634  } while (!got_frame && !d->finished);
635 
636  return got_frame;
637 }
638 
639 static void decoder_destroy(Decoder *d) {
640  av_packet_unref(&d->pkt);
642 }
643 
645 {
646  av_frame_unref(vp->frame);
647  avsubtitle_free(&vp->sub);
648 }
649 
650 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
651 {
652  int i;
653  memset(f, 0, sizeof(FrameQueue));
654  if (!(f->mutex = SDL_CreateMutex())) {
655  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
656  return AVERROR(ENOMEM);
657  }
658  if (!(f->cond = SDL_CreateCond())) {
659  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
660  return AVERROR(ENOMEM);
661  }
662  f->pktq = pktq;
663  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
664  f->keep_last = !!keep_last;
665  for (i = 0; i < f->max_size; i++)
666  if (!(f->queue[i].frame = av_frame_alloc()))
667  return AVERROR(ENOMEM);
668  return 0;
669 }
670 
672 {
673  int i;
674  for (i = 0; i < f->max_size; i++) {
675  Frame *vp = &f->queue[i];
677  av_frame_free(&vp->frame);
678  free_picture(vp);
679  }
680  SDL_DestroyMutex(f->mutex);
681  SDL_DestroyCond(f->cond);
682 }
683 
685 {
686  SDL_LockMutex(f->mutex);
687  SDL_CondSignal(f->cond);
688  SDL_UnlockMutex(f->mutex);
689 }
690 
692 {
693  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
694 }
695 
697 {
698  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
699 }
700 
702 {
703  return &f->queue[f->rindex];
704 }
705 
707 {
708  /* wait until we have space to put a new frame */
709  SDL_LockMutex(f->mutex);
710  while (f->size >= f->max_size &&
711  !f->pktq->abort_request) {
712  SDL_CondWait(f->cond, f->mutex);
713  }
714  SDL_UnlockMutex(f->mutex);
715 
716  if (f->pktq->abort_request)
717  return NULL;
718 
719  return &f->queue[f->windex];
720 }
721 
723 {
724  /* wait until we have a readable a new frame */
725  SDL_LockMutex(f->mutex);
726  while (f->size - f->rindex_shown <= 0 &&
727  !f->pktq->abort_request) {
728  SDL_CondWait(f->cond, f->mutex);
729  }
730  SDL_UnlockMutex(f->mutex);
731 
732  if (f->pktq->abort_request)
733  return NULL;
734 
735  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
736 }
737 
739 {
740  if (++f->windex == f->max_size)
741  f->windex = 0;
742  SDL_LockMutex(f->mutex);
743  f->size++;
744  SDL_CondSignal(f->cond);
745  SDL_UnlockMutex(f->mutex);
746 }
747 
749 {
750  if (f->keep_last && !f->rindex_shown) {
751  f->rindex_shown = 1;
752  return;
753  }
755  if (++f->rindex == f->max_size)
756  f->rindex = 0;
757  SDL_LockMutex(f->mutex);
758  f->size--;
759  SDL_CondSignal(f->cond);
760  SDL_UnlockMutex(f->mutex);
761 }
762 
763 /* return the number of undisplayed frames in the queue */
765 {
766  return f->size - f->rindex_shown;
767 }
768 
769 /* return last shown position */
771 {
772  Frame *fp = &f->queue[f->rindex];
773  if (f->rindex_shown && fp->serial == f->pktq->serial)
774  return fp->pos;
775  else
776  return -1;
777 }
778 
779 static void decoder_abort(Decoder *d, FrameQueue *fq)
780 {
782  frame_queue_signal(fq);
783  SDL_WaitThread(d->decoder_tid, NULL);
784  d->decoder_tid = NULL;
786 }
787 
788 static inline void fill_rectangle(int x, int y, int w, int h)
789 {
790  SDL_Rect rect;
791  rect.x = x;
792  rect.y = y;
793  rect.w = w;
794  rect.h = h;
795  if (w && h)
796  SDL_RenderFillRect(renderer, &rect);
797 }
798 
799 static void free_picture(Frame *vp)
800 {
801  if (vp->bmp) {
802  SDL_DestroyTexture(vp->bmp);
803  vp->bmp = NULL;
804  }
805 }
806 
807 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
808 {
809  Uint32 format;
810  int access, w, h;
811  if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
812  void *pixels;
813  int pitch;
814  SDL_DestroyTexture(*texture);
815  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
816  return -1;
817  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
818  return -1;
819  if (init_texture) {
820  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
821  return -1;
822  memset(pixels, 0, pitch * new_height);
823  SDL_UnlockTexture(*texture);
824  }
825  }
826  return 0;
827 }
828 
829 static void calculate_display_rect(SDL_Rect *rect,
830  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
831  int pic_width, int pic_height, AVRational pic_sar)
832 {
833  float aspect_ratio;
834  int width, height, x, y;
835 
836  if (pic_sar.num == 0)
837  aspect_ratio = 0;
838  else
839  aspect_ratio = av_q2d(pic_sar);
840 
841  if (aspect_ratio <= 0.0)
842  aspect_ratio = 1.0;
843  aspect_ratio *= (float)pic_width / (float)pic_height;
844 
845  /* XXX: we suppose the screen has a 1.0 pixel ratio */
846  height = scr_height;
847  width = lrint(height * aspect_ratio) & ~1;
848  if (width > scr_width) {
849  width = scr_width;
850  height = lrint(width / aspect_ratio) & ~1;
851  }
852  x = (scr_width - width) / 2;
853  y = (scr_height - height) / 2;
854  rect->x = scr_xleft + x;
855  rect->y = scr_ytop + y;
856  rect->w = FFMAX(width, 1);
857  rect->h = FFMAX(height, 1);
858 }
859 
860 static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
861  int ret = 0;
862  switch (frame->format) {
863  case AV_PIX_FMT_YUV420P:
864  ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
865  frame->data[1], frame->linesize[1],
866  frame->data[2], frame->linesize[2]);
867  break;
868  case AV_PIX_FMT_BGRA:
869  ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
870  break;
871  default:
872  /* This should only happen if we are not using avfilter... */
873  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
874  frame->width, frame->height, frame->format, frame->width, frame->height,
876  if (*img_convert_ctx != NULL) {
877  uint8_t *pixels;
878  int pitch;
879  if (!SDL_LockTexture(tex, NULL, (void **)&pixels, &pitch)) {
880  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
881  0, frame->height, &pixels, &pitch);
882  SDL_UnlockTexture(tex);
883  }
884  } else {
885  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
886  ret = -1;
887  }
888  break;
889  }
890  return ret;
891 }
892 
894 {
895  Frame *vp;
896  Frame *sp = NULL;
897  SDL_Rect rect;
898 
899  vp = frame_queue_peek_last(&is->pictq);
900  if (vp->bmp) {
901  if (is->subtitle_st) {
902  if (frame_queue_nb_remaining(&is->subpq) > 0) {
903  sp = frame_queue_peek(&is->subpq);
904 
905  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
906  if (!sp->uploaded) {
907  uint8_t *pixels;
908  int pitch;
909  int i;
910  if (!sp->width || !sp->height) {
911  sp->width = vp->width;
912  sp->height = vp->height;
913  }
914  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
915  return;
916 
917  for (i = 0; i < sp->sub.num_rects; i++) {
918  AVSubtitleRect *sub_rect = sp->sub.rects[i];
919 
920  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
921  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
922  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
923  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
924 
926  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
927  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
928  0, NULL, NULL, NULL);
929  if (!is->sub_convert_ctx) {
930  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
931  return;
932  }
933  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
934  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
935  0, sub_rect->h, &pixels, &pitch);
936  SDL_UnlockTexture(is->sub_texture);
937  }
938  }
939  sp->uploaded = 1;
940  }
941  } else
942  sp = NULL;
943  }
944  }
945 
946  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
947 
948  if (!vp->uploaded) {
949  if (upload_texture(vp->bmp, vp->frame, &is->img_convert_ctx) < 0)
950  return;
951  vp->uploaded = 1;
952  }
953 
954  SDL_RenderCopy(renderer, vp->bmp, NULL, &rect);
955  if (sp) {
956 #if USE_ONEPASS_SUBTITLE_RENDER
957  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
958 #else
959  int i;
960  double xratio = (double)rect.w / (double)sp->width;
961  double yratio = (double)rect.h / (double)sp->height;
962  for (i = 0; i < sp->sub.num_rects; i++) {
963  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
964  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
965  .y = rect.y + sub_rect->y * yratio,
966  .w = sub_rect->w * xratio,
967  .h = sub_rect->h * yratio};
968  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
969  }
970 #endif
971  }
972  }
973 }
974 
975 static inline int compute_mod(int a, int b)
976 {
977  return a < 0 ? a%b + b : a%b;
978 }
979 
981 {
982  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
983  int ch, channels, h, h2;
984  int64_t time_diff;
985  int rdft_bits, nb_freq;
986 
987  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
988  ;
989  nb_freq = 1 << (rdft_bits - 1);
990 
991  /* compute display index : center on currently output samples */
992  channels = s->audio_tgt.channels;
993  nb_display_channels = channels;
994  if (!s->paused) {
995  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
996  n = 2 * channels;
997  delay = s->audio_write_buf_size;
998  delay /= n;
999 
1000  /* to be more precise, we take into account the time spent since
1001  the last buffer computation */
1002  if (audio_callback_time) {
1003  time_diff = av_gettime_relative() - audio_callback_time;
1004  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1005  }
1006 
1007  delay += 2 * data_used;
1008  if (delay < data_used)
1009  delay = data_used;
1010 
1011  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1012  if (s->show_mode == SHOW_MODE_WAVES) {
1013  h = INT_MIN;
1014  for (i = 0; i < 1000; i += channels) {
1015  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1016  int a = s->sample_array[idx];
1017  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1018  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1019  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1020  int score = a - d;
1021  if (h < score && (b ^ c) < 0) {
1022  h = score;
1023  i_start = idx;
1024  }
1025  }
1026  }
1027 
1028  s->last_i_start = i_start;
1029  } else {
1030  i_start = s->last_i_start;
1031  }
1032 
1033  if (s->show_mode == SHOW_MODE_WAVES) {
1034  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1035 
1036  /* total height for one channel */
1037  h = s->height / nb_display_channels;
1038  /* graph height / 2 */
1039  h2 = (h * 9) / 20;
1040  for (ch = 0; ch < nb_display_channels; ch++) {
1041  i = i_start + ch;
1042  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1043  for (x = 0; x < s->width; x++) {
1044  y = (s->sample_array[i] * h2) >> 15;
1045  if (y < 0) {
1046  y = -y;
1047  ys = y1 - y;
1048  } else {
1049  ys = y1;
1050  }
1051  fill_rectangle(s->xleft + x, ys, 1, y);
1052  i += channels;
1053  if (i >= SAMPLE_ARRAY_SIZE)
1054  i -= SAMPLE_ARRAY_SIZE;
1055  }
1056  }
1057 
1058  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1059 
1060  for (ch = 1; ch < nb_display_channels; ch++) {
1061  y = s->ytop + ch * h;
1062  fill_rectangle(s->xleft, y, s->width, 1);
1063  }
1064  } else {
1065  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1066  return;
1067 
1068  nb_display_channels= FFMIN(nb_display_channels, 2);
1069  if (rdft_bits != s->rdft_bits) {
1070  av_rdft_end(s->rdft);
1071  av_free(s->rdft_data);
1072  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1073  s->rdft_bits = rdft_bits;
1074  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1075  }
1076  if (!s->rdft || !s->rdft_data){
1077  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1078  s->show_mode = SHOW_MODE_WAVES;
1079  } else {
1080  FFTSample *data[2];
1081  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1082  uint32_t *pixels;
1083  int pitch;
1084  for (ch = 0; ch < nb_display_channels; ch++) {
1085  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1086  i = i_start + ch;
1087  for (x = 0; x < 2 * nb_freq; x++) {
1088  double w = (x-nb_freq) * (1.0 / nb_freq);
1089  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1090  i += channels;
1091  if (i >= SAMPLE_ARRAY_SIZE)
1092  i -= SAMPLE_ARRAY_SIZE;
1093  }
1094  av_rdft_calc(s->rdft, data[ch]);
1095  }
1096  /* Least efficient way to do this, we should of course
1097  * directly access it but it is more than fast enough. */
1098  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1099  pitch >>= 2;
1100  pixels += pitch * s->height;
1101  for (y = 0; y < s->height; y++) {
1102  double w = 1 / sqrt(nb_freq);
1103  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1104  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1105  : a;
1106  a = FFMIN(a, 255);
1107  b = FFMIN(b, 255);
1108  pixels -= pitch;
1109  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1110  }
1111  SDL_UnlockTexture(s->vis_texture);
1112  }
1113  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1114  }
1115  if (!s->paused)
1116  s->xpos++;
1117  if (s->xpos >= s->width)
1118  s->xpos= s->xleft;
1119  }
1120 }
1121 
1122 static void stream_component_close(VideoState *is, int stream_index)
1123 {
1124  AVFormatContext *ic = is->ic;
1125  AVCodecParameters *codecpar;
1126 
1127  if (stream_index < 0 || stream_index >= ic->nb_streams)
1128  return;
1129  codecpar = ic->streams[stream_index]->codecpar;
1130 
1131  switch (codecpar->codec_type) {
1132  case AVMEDIA_TYPE_AUDIO:
1133  decoder_abort(&is->auddec, &is->sampq);
1134  SDL_CloseAudio();
1135  decoder_destroy(&is->auddec);
1136  swr_free(&is->swr_ctx);
1137  av_freep(&is->audio_buf1);
1138  is->audio_buf1_size = 0;
1139  is->audio_buf = NULL;
1140 
1141  if (is->rdft) {
1142  av_rdft_end(is->rdft);
1143  av_freep(&is->rdft_data);
1144  is->rdft = NULL;
1145  is->rdft_bits = 0;
1146  }
1147  break;
1148  case AVMEDIA_TYPE_VIDEO:
1149  decoder_abort(&is->viddec, &is->pictq);
1150  decoder_destroy(&is->viddec);
1151  break;
1152  case AVMEDIA_TYPE_SUBTITLE:
1153  decoder_abort(&is->subdec, &is->subpq);
1154  decoder_destroy(&is->subdec);
1155  break;
1156  default:
1157  break;
1158  }
1159 
1160  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1161  switch (codecpar->codec_type) {
1162  case AVMEDIA_TYPE_AUDIO:
1163  is->audio_st = NULL;
1164  is->audio_stream = -1;
1165  break;
1166  case AVMEDIA_TYPE_VIDEO:
1167  is->video_st = NULL;
1168  is->video_stream = -1;
1169  break;
1170  case AVMEDIA_TYPE_SUBTITLE:
1171  is->subtitle_st = NULL;
1172  is->subtitle_stream = -1;
1173  break;
1174  default:
1175  break;
1176  }
1177 }
1178 
1179 static void stream_close(VideoState *is)
1180 {
1181  /* XXX: use a special url_shutdown call to abort parse cleanly */
1182  is->abort_request = 1;
1183  SDL_WaitThread(is->read_tid, NULL);
1184 
1185  /* close each stream */
1186  if (is->audio_stream >= 0)
1188  if (is->video_stream >= 0)
1190  if (is->subtitle_stream >= 0)
1192 
1193  avformat_close_input(&is->ic);
1194 
1198 
1199  /* free all pictures */
1200  frame_queue_destory(&is->pictq);
1201  frame_queue_destory(&is->sampq);
1202  frame_queue_destory(&is->subpq);
1203  SDL_DestroyCond(is->continue_read_thread);
1206  av_free(is->filename);
1207  if (is->vis_texture)
1208  SDL_DestroyTexture(is->vis_texture);
1209  if (is->sub_texture)
1210  SDL_DestroyTexture(is->sub_texture);
1211  av_free(is);
1212 }
1213 
1214 static void do_exit(VideoState *is)
1215 {
1216  if (is) {
1217  stream_close(is);
1218  }
1219  if (renderer)
1220  SDL_DestroyRenderer(renderer);
1221  if (window)
1222  SDL_DestroyWindow(window);
1224  uninit_opts();
1225 #if CONFIG_AVFILTER
1226  av_freep(&vfilters_list);
1227 #endif
1229  if (show_status)
1230  printf("\n");
1231  SDL_Quit();
1232  av_log(NULL, AV_LOG_QUIET, "%s", "");
1233  exit(0);
1234 }
1235 
1236 static void sigterm_handler(int sig)
1237 {
1238  exit(123);
1239 }
1240 
1242 {
1243  SDL_Rect rect;
1244  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1245  default_width = rect.w;
1246  default_height = rect.h;
1247 }
1248 
1249 static int video_open(VideoState *is, Frame *vp)
1250 {
1251  int w,h;
1252 
1253  if (vp && vp->width)
1254  set_default_window_size(vp->width, vp->height, vp->sar);
1255 
1256  if (screen_width) {
1257  w = screen_width;
1258  h = screen_height;
1259  } else {
1260  w = default_width;
1261  h = default_height;
1262  }
1263 
1264  if (!window) {
1265  int flags = SDL_WINDOW_SHOWN | SDL_WINDOW_RESIZABLE;
1266  if (!window_title)
1268  if (is_full_screen)
1269  flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
1270  window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
1271  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
1272  if (window) {
1273  SDL_RendererInfo info;
1274  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
1275  if (renderer) {
1276  if (!SDL_GetRendererInfo(renderer, &info))
1277  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
1278  }
1279  }
1280  } else {
1281  SDL_SetWindowSize(window, w, h);
1282  }
1283 
1284  if (!window || !renderer) {
1285  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1286  do_exit(is);
1287  }
1288 
1289  is->width = w;
1290  is->height = h;
1291 
1292  return 0;
1293 }
1294 
1295 /* display the current picture, if any */
1296 static void video_display(VideoState *is)
1297 {
1298  if (!window)
1299  video_open(is, NULL);
1300 
1301  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1302  SDL_RenderClear(renderer);
1303  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1304  video_audio_display(is);
1305  else if (is->video_st)
1306  video_image_display(is);
1307  SDL_RenderPresent(renderer);
1308 }
1309 
1310 static double get_clock(Clock *c)
1311 {
1312  if (*c->queue_serial != c->serial)
1313  return NAN;
1314  if (c->paused) {
1315  return c->pts;
1316  } else {
1317  double time = av_gettime_relative() / 1000000.0;
1318  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1319  }
1320 }
1321 
1322 static void set_clock_at(Clock *c, double pts, int serial, double time)
1323 {
1324  c->pts = pts;
1325  c->last_updated = time;
1326  c->pts_drift = c->pts - time;
1327  c->serial = serial;
1328 }
1329 
1330 static void set_clock(Clock *c, double pts, int serial)
1331 {
1332  double time = av_gettime_relative() / 1000000.0;
1333  set_clock_at(c, pts, serial, time);
1334 }
1335 
1336 static void set_clock_speed(Clock *c, double speed)
1337 {
1338  set_clock(c, get_clock(c), c->serial);
1339  c->speed = speed;
1340 }
1341 
1342 static void init_clock(Clock *c, int *queue_serial)
1343 {
1344  c->speed = 1.0;
1345  c->paused = 0;
1346  c->queue_serial = queue_serial;
1347  set_clock(c, NAN, -1);
1348 }
1349 
1350 static void sync_clock_to_slave(Clock *c, Clock *slave)
1351 {
1352  double clock = get_clock(c);
1353  double slave_clock = get_clock(slave);
1354  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1355  set_clock(c, slave_clock, slave->serial);
1356 }
1357 
1359  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1360  if (is->video_st)
1361  return AV_SYNC_VIDEO_MASTER;
1362  else
1363  return AV_SYNC_AUDIO_MASTER;
1364  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1365  if (is->audio_st)
1366  return AV_SYNC_AUDIO_MASTER;
1367  else
1368  return AV_SYNC_EXTERNAL_CLOCK;
1369  } else {
1370  return AV_SYNC_EXTERNAL_CLOCK;
1371  }
1372 }
1373 
1374 /* get the current master clock value */
1375 static double get_master_clock(VideoState *is)
1376 {
1377  double val;
1378 
1379  switch (get_master_sync_type(is)) {
1380  case AV_SYNC_VIDEO_MASTER:
1381  val = get_clock(&is->vidclk);
1382  break;
1383  case AV_SYNC_AUDIO_MASTER:
1384  val = get_clock(&is->audclk);
1385  break;
1386  default:
1387  val = get_clock(&is->extclk);
1388  break;
1389  }
1390  return val;
1391 }
1392 
1394  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1397  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1400  } else {
1401  double speed = is->extclk.speed;
1402  if (speed != 1.0)
1403  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1404  }
1405 }
1406 
1407 /* seek in the stream */
1408 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1409 {
1410  if (!is->seek_req) {
1411  is->seek_pos = pos;
1412  is->seek_rel = rel;
1413  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1414  if (seek_by_bytes)
1416  is->seek_req = 1;
1417  SDL_CondSignal(is->continue_read_thread);
1418  }
1419 }
1420 
1421 /* pause or resume the video */
1423 {
1424  if (is->paused) {
1425  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1426  if (is->read_pause_return != AVERROR(ENOSYS)) {
1427  is->vidclk.paused = 0;
1428  }
1429  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1430  }
1431  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1432  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1433 }
1434 
1435 static void toggle_pause(VideoState *is)
1436 {
1437  stream_toggle_pause(is);
1438  is->step = 0;
1439 }
1440 
1441 static void toggle_mute(VideoState *is)
1442 {
1443  is->muted = !is->muted;
1444 }
1445 
1446 static void update_volume(VideoState *is, int sign, int step)
1447 {
1448  is->audio_volume = av_clip(is->audio_volume + sign * step, 0, SDL_MIX_MAXVOLUME);
1449 }
1450 
1452 {
1453  /* if the stream is paused unpause it, then step */
1454  if (is->paused)
1455  stream_toggle_pause(is);
1456  is->step = 1;
1457 }
1458 
1459 static double compute_target_delay(double delay, VideoState *is)
1460 {
1461  double sync_threshold, diff = 0;
1462 
1463  /* update delay to follow master synchronisation source */
1465  /* if video is slave, we try to correct big delays by
1466  duplicating or deleting a frame */
1467  diff = get_clock(&is->vidclk) - get_master_clock(is);
1468 
1469  /* skip or repeat frame. We take into account the
1470  delay to compute the threshold. I still don't know
1471  if it is the best guess */
1472  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1473  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1474  if (diff <= -sync_threshold)
1475  delay = FFMAX(0, delay + diff);
1476  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1477  delay = delay + diff;
1478  else if (diff >= sync_threshold)
1479  delay = 2 * delay;
1480  }
1481  }
1482 
1483  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1484  delay, -diff);
1485 
1486  return delay;
1487 }
1488 
1489 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1490  if (vp->serial == nextvp->serial) {
1491  double duration = nextvp->pts - vp->pts;
1492  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1493  return vp->duration;
1494  else
1495  return duration;
1496  } else {
1497  return 0.0;
1498  }
1499 }
1500 
1501 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1502  /* update current video pts */
1503  set_clock(&is->vidclk, pts, serial);
1504  sync_clock_to_slave(&is->extclk, &is->vidclk);
1505 }
1506 
1507 /* called to display each frame */
1508 static void video_refresh(void *opaque, double *remaining_time)
1509 {
1510  VideoState *is = opaque;
1511  double time;
1512 
1513  Frame *sp, *sp2;
1514 
1515  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1517 
1518  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1519  time = av_gettime_relative() / 1000000.0;
1520  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1521  video_display(is);
1522  is->last_vis_time = time;
1523  }
1524  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1525  }
1526 
1527  if (is->video_st) {
1528 retry:
1529  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1530  // nothing to do, no picture to display in the queue
1531  } else {
1532  double last_duration, duration, delay;
1533  Frame *vp, *lastvp;
1534 
1535  /* dequeue the picture */
1536  lastvp = frame_queue_peek_last(&is->pictq);
1537  vp = frame_queue_peek(&is->pictq);
1538 
1539  if (vp->serial != is->videoq.serial) {
1540  frame_queue_next(&is->pictq);
1541  goto retry;
1542  }
1543 
1544  if (lastvp->serial != vp->serial)
1545  is->frame_timer = av_gettime_relative() / 1000000.0;
1546 
1547  if (is->paused)
1548  goto display;
1549 
1550  /* compute nominal last_duration */
1551  last_duration = vp_duration(is, lastvp, vp);
1552  delay = compute_target_delay(last_duration, is);
1553 
1554  time= av_gettime_relative()/1000000.0;
1555  if (time < is->frame_timer + delay) {
1556  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1557  goto display;
1558  }
1559 
1560  is->frame_timer += delay;
1561  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1562  is->frame_timer = time;
1563 
1564  SDL_LockMutex(is->pictq.mutex);
1565  if (!isnan(vp->pts))
1566  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1567  SDL_UnlockMutex(is->pictq.mutex);
1568 
1569  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1570  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1571  duration = vp_duration(is, vp, nextvp);
1572  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1573  is->frame_drops_late++;
1574  frame_queue_next(&is->pictq);
1575  goto retry;
1576  }
1577  }
1578 
1579  if (is->subtitle_st) {
1580  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1581  sp = frame_queue_peek(&is->subpq);
1582 
1583  if (frame_queue_nb_remaining(&is->subpq) > 1)
1584  sp2 = frame_queue_peek_next(&is->subpq);
1585  else
1586  sp2 = NULL;
1587 
1588  if (sp->serial != is->subtitleq.serial
1589  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1590  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1591  {
1592  if (sp->uploaded) {
1593  int i;
1594  for (i = 0; i < sp->sub.num_rects; i++) {
1595  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1596  uint8_t *pixels;
1597  int pitch, j;
1598 
1599  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1600  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1601  memset(pixels, 0, sub_rect->w << 2);
1602  SDL_UnlockTexture(is->sub_texture);
1603  }
1604  }
1605  }
1606  frame_queue_next(&is->subpq);
1607  } else {
1608  break;
1609  }
1610  }
1611  }
1612 
1613  frame_queue_next(&is->pictq);
1614  is->force_refresh = 1;
1615 
1616  if (is->step && !is->paused)
1617  stream_toggle_pause(is);
1618  }
1619 display:
1620  /* display picture */
1621  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1622  video_display(is);
1623  }
1624  is->force_refresh = 0;
1625  if (show_status) {
1626  static int64_t last_time;
1627  int64_t cur_time;
1628  int aqsize, vqsize, sqsize;
1629  double av_diff;
1630 
1631  cur_time = av_gettime_relative();
1632  if (!last_time || (cur_time - last_time) >= 30000) {
1633  aqsize = 0;
1634  vqsize = 0;
1635  sqsize = 0;
1636  if (is->audio_st)
1637  aqsize = is->audioq.size;
1638  if (is->video_st)
1639  vqsize = is->videoq.size;
1640  if (is->subtitle_st)
1641  sqsize = is->subtitleq.size;
1642  av_diff = 0;
1643  if (is->audio_st && is->video_st)
1644  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1645  else if (is->video_st)
1646  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1647  else if (is->audio_st)
1648  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1650  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1651  get_master_clock(is),
1652  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1653  av_diff,
1655  aqsize / 1024,
1656  vqsize / 1024,
1657  sqsize,
1660  fflush(stdout);
1661  last_time = cur_time;
1662  }
1663  }
1664 }
1665 
1666 /* allocate a picture (needs to do that in main thread to avoid
1667  potential locking problems */
1668 static void alloc_picture(VideoState *is)
1669 {
1670  Frame *vp;
1671  int sdl_format;
1672 
1673  vp = &is->pictq.queue[is->pictq.windex];
1674 
1675  video_open(is, vp);
1676 
1677  if (vp->format == AV_PIX_FMT_YUV420P)
1678  sdl_format = SDL_PIXELFORMAT_YV12;
1679  else
1680  sdl_format = SDL_PIXELFORMAT_ARGB8888;
1681 
1682  if (realloc_texture(&vp->bmp, sdl_format, vp->width, vp->height, SDL_BLENDMODE_NONE, 0) < 0) {
1683  /* SDL allocates a buffer smaller than requested if the video
1684  * overlay hardware is unable to support the requested size. */
1686  "Error: the video system does not support an image\n"
1687  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1688  "to reduce the image size.\n", vp->width, vp->height );
1689  do_exit(is);
1690  }
1691 
1692  SDL_LockMutex(is->pictq.mutex);
1693  vp->allocated = 1;
1694  SDL_CondSignal(is->pictq.cond);
1695  SDL_UnlockMutex(is->pictq.mutex);
1696 }
1697 
1698 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1699 {
1700  Frame *vp;
1701 
1702 #if defined(DEBUG_SYNC)
1703  printf("frame_type=%c pts=%0.3f\n",
1704  av_get_picture_type_char(src_frame->pict_type), pts);
1705 #endif
1706 
1707  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1708  return -1;
1709 
1710  vp->sar = src_frame->sample_aspect_ratio;
1711  vp->uploaded = 0;
1712 
1713  /* alloc or resize hardware picture buffer */
1714  if (!vp->bmp || !vp->allocated ||
1715  vp->width != src_frame->width ||
1716  vp->height != src_frame->height ||
1717  vp->format != src_frame->format) {
1718  SDL_Event event;
1719 
1720  vp->allocated = 0;
1721  vp->width = src_frame->width;
1722  vp->height = src_frame->height;
1723  vp->format = src_frame->format;
1724 
1725  /* the allocation must be done in the main thread to avoid
1726  locking problems. */
1727  event.type = FF_ALLOC_EVENT;
1728  event.user.data1 = is;
1729  SDL_PushEvent(&event);
1730 
1731  /* wait until the picture is allocated */
1732  SDL_LockMutex(is->pictq.mutex);
1733  while (!vp->allocated && !is->videoq.abort_request) {
1734  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1735  }
1736  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1737  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, FF_ALLOC_EVENT, FF_ALLOC_EVENT) != 1) {
1738  while (!vp->allocated && !is->abort_request) {
1739  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1740  }
1741  }
1742  SDL_UnlockMutex(is->pictq.mutex);
1743 
1744  if (is->videoq.abort_request)
1745  return -1;
1746  }
1747 
1748  /* if the frame is not skipped, then display it */
1749  if (vp->bmp) {
1750  vp->pts = pts;
1751  vp->duration = duration;
1752  vp->pos = pos;
1753  vp->serial = serial;
1754 
1755  av_frame_move_ref(vp->frame, src_frame);
1756  frame_queue_push(&is->pictq);
1757  }
1758  return 0;
1759 }
1760 
1762 {
1763  int got_picture;
1764 
1765  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1766  return -1;
1767 
1768  if (got_picture) {
1769  double dpts = NAN;
1770 
1771  if (frame->pts != AV_NOPTS_VALUE)
1772  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1773 
1774  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1775 
1777  if (frame->pts != AV_NOPTS_VALUE) {
1778  double diff = dpts - get_master_clock(is);
1779  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1780  diff - is->frame_last_filter_delay < 0 &&
1781  is->viddec.pkt_serial == is->vidclk.serial &&
1782  is->videoq.nb_packets) {
1783  is->frame_drops_early++;
1784  av_frame_unref(frame);
1785  got_picture = 0;
1786  }
1787  }
1788  }
1789  }
1790 
1791  return got_picture;
1792 }
1793 
1794 #if CONFIG_AVFILTER
1795 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1796  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1797 {
1798  int ret, i;
1799  int nb_filters = graph->nb_filters;
1801 
1802  if (filtergraph) {
1803  outputs = avfilter_inout_alloc();
1804  inputs = avfilter_inout_alloc();
1805  if (!outputs || !inputs) {
1806  ret = AVERROR(ENOMEM);
1807  goto fail;
1808  }
1809 
1810  outputs->name = av_strdup("in");
1811  outputs->filter_ctx = source_ctx;
1812  outputs->pad_idx = 0;
1813  outputs->next = NULL;
1814 
1815  inputs->name = av_strdup("out");
1816  inputs->filter_ctx = sink_ctx;
1817  inputs->pad_idx = 0;
1818  inputs->next = NULL;
1819 
1820  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1821  goto fail;
1822  } else {
1823  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1824  goto fail;
1825  }
1826 
1827  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1828  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1829  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1830 
1831  ret = avfilter_graph_config(graph, NULL);
1832 fail:
1833  avfilter_inout_free(&outputs);
1834  avfilter_inout_free(&inputs);
1835  return ret;
1836 }
1837 
1838 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1839 {
1841  char sws_flags_str[512] = "";
1842  char buffersrc_args[256];
1843  int ret;
1844  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1845  AVCodecParameters *codecpar = is->video_st->codecpar;
1846  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1847  AVDictionaryEntry *e = NULL;
1848 
1849  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1850  if (!strcmp(e->key, "sws_flags")) {
1851  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1852  } else
1853  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1854  }
1855  if (strlen(sws_flags_str))
1856  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1857 
1858  graph->scale_sws_opts = av_strdup(sws_flags_str);
1859 
1860  snprintf(buffersrc_args, sizeof(buffersrc_args),
1861  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1862  frame->width, frame->height, frame->format,
1864  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1865  if (fr.num && fr.den)
1866  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1867 
1868  if ((ret = avfilter_graph_create_filter(&filt_src,
1869  avfilter_get_by_name("buffer"),
1870  "ffplay_buffer", buffersrc_args, NULL,
1871  graph)) < 0)
1872  goto fail;
1873 
1874  ret = avfilter_graph_create_filter(&filt_out,
1875  avfilter_get_by_name("buffersink"),
1876  "ffplay_buffersink", NULL, NULL, graph);
1877  if (ret < 0)
1878  goto fail;
1879 
1880  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1881  goto fail;
1882 
1883  last_filter = filt_out;
1884 
1885 /* Note: this macro adds a filter before the lastly added filter, so the
1886  * processing order of the filters is in reverse */
1887 #define INSERT_FILT(name, arg) do { \
1888  AVFilterContext *filt_ctx; \
1889  \
1890  ret = avfilter_graph_create_filter(&filt_ctx, \
1891  avfilter_get_by_name(name), \
1892  "ffplay_" name, arg, NULL, graph); \
1893  if (ret < 0) \
1894  goto fail; \
1895  \
1896  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1897  if (ret < 0) \
1898  goto fail; \
1899  \
1900  last_filter = filt_ctx; \
1901 } while (0)
1902 
1903  if (autorotate) {
1904  double theta = get_rotation(is->video_st);
1905 
1906  if (fabs(theta - 90) < 1.0) {
1907  INSERT_FILT("transpose", "clock");
1908  } else if (fabs(theta - 180) < 1.0) {
1909  INSERT_FILT("hflip", NULL);
1910  INSERT_FILT("vflip", NULL);
1911  } else if (fabs(theta - 270) < 1.0) {
1912  INSERT_FILT("transpose", "cclock");
1913  } else if (fabs(theta) > 1.0) {
1914  char rotate_buf[64];
1915  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1916  INSERT_FILT("rotate", rotate_buf);
1917  }
1918  }
1919 
1920  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1921  goto fail;
1922 
1923  is->in_video_filter = filt_src;
1924  is->out_video_filter = filt_out;
1925 
1926 fail:
1927  return ret;
1928 }
1929 
1930 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1931 {
1933  int sample_rates[2] = { 0, -1 };
1934  int64_t channel_layouts[2] = { 0, -1 };
1935  int channels[2] = { 0, -1 };
1936  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1937  char aresample_swr_opts[512] = "";
1938  AVDictionaryEntry *e = NULL;
1939  char asrc_args[256];
1940  int ret;
1941 
1942  avfilter_graph_free(&is->agraph);
1943  if (!(is->agraph = avfilter_graph_alloc()))
1944  return AVERROR(ENOMEM);
1945 
1946  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1947  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1948  if (strlen(aresample_swr_opts))
1949  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1950  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1951 
1952  ret = snprintf(asrc_args, sizeof(asrc_args),
1953  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1954  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1955  is->audio_filter_src.channels,
1956  1, is->audio_filter_src.freq);
1957  if (is->audio_filter_src.channel_layout)
1958  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1959  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1960 
1961  ret = avfilter_graph_create_filter(&filt_asrc,
1962  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1963  asrc_args, NULL, is->agraph);
1964  if (ret < 0)
1965  goto end;
1966 
1967 
1968  ret = avfilter_graph_create_filter(&filt_asink,
1969  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1970  NULL, NULL, is->agraph);
1971  if (ret < 0)
1972  goto end;
1973 
1974  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1975  goto end;
1976  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1977  goto end;
1978 
1979  if (force_output_format) {
1980  channel_layouts[0] = is->audio_tgt.channel_layout;
1981  channels [0] = is->audio_tgt.channels;
1982  sample_rates [0] = is->audio_tgt.freq;
1983  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1984  goto end;
1985  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1986  goto end;
1987  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1988  goto end;
1989  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1990  goto end;
1991  }
1992 
1993 
1994  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1995  goto end;
1996 
1997  is->in_audio_filter = filt_asrc;
1998  is->out_audio_filter = filt_asink;
1999 
2000 end:
2001  if (ret < 0)
2002  avfilter_graph_free(&is->agraph);
2003  return ret;
2004 }
2005 #endif /* CONFIG_AVFILTER */
2006 
2007 static int audio_thread(void *arg)
2008 {
2009  VideoState *is = arg;
2010  AVFrame *frame = av_frame_alloc();
2011  Frame *af;
2012 #if CONFIG_AVFILTER
2013  int last_serial = -1;
2014  int64_t dec_channel_layout;
2015  int reconfigure;
2016 #endif
2017  int got_frame = 0;
2018  AVRational tb;
2019  int ret = 0;
2020 
2021  if (!frame)
2022  return AVERROR(ENOMEM);
2023 
2024  do {
2025  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2026  goto the_end;
2027 
2028  if (got_frame) {
2029  tb = (AVRational){1, frame->sample_rate};
2030 
2031 #if CONFIG_AVFILTER
2032  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2033 
2034  reconfigure =
2035  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2036  frame->format, av_frame_get_channels(frame)) ||
2037  is->audio_filter_src.channel_layout != dec_channel_layout ||
2038  is->audio_filter_src.freq != frame->sample_rate ||
2039  is->auddec.pkt_serial != last_serial;
2040 
2041  if (reconfigure) {
2042  char buf1[1024], buf2[1024];
2043  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2044  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2046  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2047  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2049 
2050  is->audio_filter_src.fmt = frame->format;
2051  is->audio_filter_src.channels = av_frame_get_channels(frame);
2052  is->audio_filter_src.channel_layout = dec_channel_layout;
2053  is->audio_filter_src.freq = frame->sample_rate;
2054  last_serial = is->auddec.pkt_serial;
2055 
2056  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2057  goto the_end;
2058  }
2059 
2060  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2061  goto the_end;
2062 
2063  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2064  tb = is->out_audio_filter->inputs[0]->time_base;
2065 #endif
2066  if (!(af = frame_queue_peek_writable(&is->sampq)))
2067  goto the_end;
2068 
2069  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2070  af->pos = av_frame_get_pkt_pos(frame);
2071  af->serial = is->auddec.pkt_serial;
2072  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2073 
2074  av_frame_move_ref(af->frame, frame);
2075  frame_queue_push(&is->sampq);
2076 
2077 #if CONFIG_AVFILTER
2078  if (is->audioq.serial != is->auddec.pkt_serial)
2079  break;
2080  }
2081  if (ret == AVERROR_EOF)
2082  is->auddec.finished = is->auddec.pkt_serial;
2083 #endif
2084  }
2085  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2086  the_end:
2087 #if CONFIG_AVFILTER
2088  avfilter_graph_free(&is->agraph);
2089 #endif
2090  av_frame_free(&frame);
2091  return ret;
2092 }
2093 
2094 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2095 {
2097  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2098  if (!d->decoder_tid) {
2099  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2100  return AVERROR(ENOMEM);
2101  }
2102  return 0;
2103 }
2104 
2105 static int video_thread(void *arg)
2106 {
2107  VideoState *is = arg;
2108  AVFrame *frame = av_frame_alloc();
2109  double pts;
2110  double duration;
2111  int ret;
2113  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2114 
2115 #if CONFIG_AVFILTER
2117  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2118  int last_w = 0;
2119  int last_h = 0;
2120  enum AVPixelFormat last_format = -2;
2121  int last_serial = -1;
2122  int last_vfilter_idx = 0;
2123  if (!graph) {
2124  av_frame_free(&frame);
2125  return AVERROR(ENOMEM);
2126  }
2127 
2128 #endif
2129 
2130  if (!frame) {
2131 #if CONFIG_AVFILTER
2132  avfilter_graph_free(&graph);
2133 #endif
2134  return AVERROR(ENOMEM);
2135  }
2136 
2137  for (;;) {
2138  ret = get_video_frame(is, frame);
2139  if (ret < 0)
2140  goto the_end;
2141  if (!ret)
2142  continue;
2143 
2144 #if CONFIG_AVFILTER
2145  if ( last_w != frame->width
2146  || last_h != frame->height
2147  || last_format != frame->format
2148  || last_serial != is->viddec.pkt_serial
2149  || last_vfilter_idx != is->vfilter_idx) {
2151  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2152  last_w, last_h,
2153  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2154  frame->width, frame->height,
2155  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2156  avfilter_graph_free(&graph);
2157  graph = avfilter_graph_alloc();
2158  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2159  SDL_Event event;
2160  event.type = FF_QUIT_EVENT;
2161  event.user.data1 = is;
2162  SDL_PushEvent(&event);
2163  goto the_end;
2164  }
2165  filt_in = is->in_video_filter;
2166  filt_out = is->out_video_filter;
2167  last_w = frame->width;
2168  last_h = frame->height;
2169  last_format = frame->format;
2170  last_serial = is->viddec.pkt_serial;
2171  last_vfilter_idx = is->vfilter_idx;
2172  frame_rate = filt_out->inputs[0]->frame_rate;
2173  }
2174 
2175  ret = av_buffersrc_add_frame(filt_in, frame);
2176  if (ret < 0)
2177  goto the_end;
2178 
2179  while (ret >= 0) {
2180  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2181 
2182  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2183  if (ret < 0) {
2184  if (ret == AVERROR_EOF)
2185  is->viddec.finished = is->viddec.pkt_serial;
2186  ret = 0;
2187  break;
2188  }
2189 
2191  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2192  is->frame_last_filter_delay = 0;
2193  tb = filt_out->inputs[0]->time_base;
2194 #endif
2195  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2196  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2197  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2198  av_frame_unref(frame);
2199 #if CONFIG_AVFILTER
2200  }
2201 #endif
2202 
2203  if (ret < 0)
2204  goto the_end;
2205  }
2206  the_end:
2207 #if CONFIG_AVFILTER
2208  avfilter_graph_free(&graph);
2209 #endif
2210  av_frame_free(&frame);
2211  return 0;
2212 }
2213 
2214 static int subtitle_thread(void *arg)
2215 {
2216  VideoState *is = arg;
2217  Frame *sp;
2218  int got_subtitle;
2219  double pts;
2220 
2221  for (;;) {
2222  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2223  return 0;
2224 
2225  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2226  break;
2227 
2228  pts = 0;
2229 
2230  if (got_subtitle && sp->sub.format == 0) {
2231  if (sp->sub.pts != AV_NOPTS_VALUE)
2232  pts = sp->sub.pts / (double)AV_TIME_BASE;
2233  sp->pts = pts;
2234  sp->serial = is->subdec.pkt_serial;
2235  sp->width = is->subdec.avctx->width;
2236  sp->height = is->subdec.avctx->height;
2237  sp->uploaded = 0;
2238 
2239  /* now we can update the picture count */
2240  frame_queue_push(&is->subpq);
2241  } else if (got_subtitle) {
2242  avsubtitle_free(&sp->sub);
2243  }
2244  }
2245  return 0;
2246 }
2247 
2248 /* copy samples for viewing in editor window */
2249 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2250 {
2251  int size, len;
2252 
2253  size = samples_size / sizeof(short);
2254  while (size > 0) {
2256  if (len > size)
2257  len = size;
2258  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2259  samples += len;
2260  is->sample_array_index += len;
2262  is->sample_array_index = 0;
2263  size -= len;
2264  }
2265 }
2266 
2267 /* return the wanted number of samples to get better sync if sync_type is video
2268  * or external master clock */
2269 static int synchronize_audio(VideoState *is, int nb_samples)
2270 {
2271  int wanted_nb_samples = nb_samples;
2272 
2273  /* if not master, then we try to remove or add samples to correct the clock */
2275  double diff, avg_diff;
2276  int min_nb_samples, max_nb_samples;
2277 
2278  diff = get_clock(&is->audclk) - get_master_clock(is);
2279 
2280  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2281  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2283  /* not enough measures to have a correct estimate */
2284  is->audio_diff_avg_count++;
2285  } else {
2286  /* estimate the A-V difference */
2287  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2288 
2289  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2290  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2291  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2292  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2293  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2294  }
2295  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2296  diff, avg_diff, wanted_nb_samples - nb_samples,
2298  }
2299  } else {
2300  /* too big difference : may be initial PTS errors, so
2301  reset A-V filter */
2302  is->audio_diff_avg_count = 0;
2303  is->audio_diff_cum = 0;
2304  }
2305  }
2306 
2307  return wanted_nb_samples;
2308 }
2309 
2310 /**
2311  * Decode one audio frame and return its uncompressed size.
2312  *
2313  * The processed audio frame is decoded, converted if required, and
2314  * stored in is->audio_buf, with size in bytes given by the return
2315  * value.
2316  */
2318 {
2319  int data_size, resampled_data_size;
2320  int64_t dec_channel_layout;
2321  av_unused double audio_clock0;
2322  int wanted_nb_samples;
2323  Frame *af;
2324 
2325  if (is->paused)
2326  return -1;
2327 
2328  do {
2329 #if defined(_WIN32)
2330  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2332  return -1;
2333  av_usleep (1000);
2334  }
2335 #endif
2336  if (!(af = frame_queue_peek_readable(&is->sampq)))
2337  return -1;
2338  frame_queue_next(&is->sampq);
2339  } while (af->serial != is->audioq.serial);
2340 
2342  af->frame->nb_samples,
2343  af->frame->format, 1);
2344 
2345  dec_channel_layout =
2348  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2349 
2350  if (af->frame->format != is->audio_src.fmt ||
2351  dec_channel_layout != is->audio_src.channel_layout ||
2352  af->frame->sample_rate != is->audio_src.freq ||
2353  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2354  swr_free(&is->swr_ctx);
2357  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2358  0, NULL);
2359  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2361  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2364  swr_free(&is->swr_ctx);
2365  return -1;
2366  }
2367  is->audio_src.channel_layout = dec_channel_layout;
2369  is->audio_src.freq = af->frame->sample_rate;
2370  is->audio_src.fmt = af->frame->format;
2371  }
2372 
2373  if (is->swr_ctx) {
2374  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2375  uint8_t **out = &is->audio_buf1;
2376  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2377  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2378  int len2;
2379  if (out_size < 0) {
2380  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2381  return -1;
2382  }
2383  if (wanted_nb_samples != af->frame->nb_samples) {
2384  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2385  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2386  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2387  return -1;
2388  }
2389  }
2390  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2391  if (!is->audio_buf1)
2392  return AVERROR(ENOMEM);
2393  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2394  if (len2 < 0) {
2395  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2396  return -1;
2397  }
2398  if (len2 == out_count) {
2399  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2400  if (swr_init(is->swr_ctx) < 0)
2401  swr_free(&is->swr_ctx);
2402  }
2403  is->audio_buf = is->audio_buf1;
2404  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2405  } else {
2406  is->audio_buf = af->frame->data[0];
2407  resampled_data_size = data_size;
2408  }
2409 
2410  audio_clock0 = is->audio_clock;
2411  /* update the audio clock with the pts */
2412  if (!isnan(af->pts))
2413  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2414  else
2415  is->audio_clock = NAN;
2416  is->audio_clock_serial = af->serial;
2417 #ifdef DEBUG
2418  {
2419  static double last_clock;
2420  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2421  is->audio_clock - last_clock,
2422  is->audio_clock, audio_clock0);
2423  last_clock = is->audio_clock;
2424  }
2425 #endif
2426  return resampled_data_size;
2427 }
2428 
2429 /* prepare a new audio buffer */
2430 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2431 {
2432  VideoState *is = opaque;
2433  int audio_size, len1;
2434 
2436 
2437  while (len > 0) {
2438  if (is->audio_buf_index >= is->audio_buf_size) {
2439  audio_size = audio_decode_frame(is);
2440  if (audio_size < 0) {
2441  /* if error, just output silence */
2442  is->audio_buf = NULL;
2444  } else {
2445  if (is->show_mode != SHOW_MODE_VIDEO)
2446  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2447  is->audio_buf_size = audio_size;
2448  }
2449  is->audio_buf_index = 0;
2450  }
2451  len1 = is->audio_buf_size - is->audio_buf_index;
2452  if (len1 > len)
2453  len1 = len;
2454  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2455  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2456  else {
2457  memset(stream, 0, len1);
2458  if (!is->muted && is->audio_buf)
2459  SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
2460  }
2461  len -= len1;
2462  stream += len1;
2463  is->audio_buf_index += len1;
2464  }
2466  /* Let's assume the audio driver that is used by SDL has two periods. */
2467  if (!isnan(is->audio_clock)) {
2469  sync_clock_to_slave(&is->extclk, &is->audclk);
2470  }
2471 }
2472 
2473 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2474 {
2475  SDL_AudioSpec wanted_spec, spec;
2476  const char *env;
2477  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2478  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2479  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2480 
2481  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2482  if (env) {
2483  wanted_nb_channels = atoi(env);
2484  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2485  }
2486  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2487  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2488  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2489  }
2490  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2491  wanted_spec.channels = wanted_nb_channels;
2492  wanted_spec.freq = wanted_sample_rate;
2493  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2494  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2495  return -1;
2496  }
2497  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2498  next_sample_rate_idx--;
2499  wanted_spec.format = AUDIO_S16SYS;
2500  wanted_spec.silence = 0;
2501  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2502  wanted_spec.callback = sdl_audio_callback;
2503  wanted_spec.userdata = opaque;
2504  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2505  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2506  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2507  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2508  if (!wanted_spec.channels) {
2509  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2510  wanted_spec.channels = wanted_nb_channels;
2511  if (!wanted_spec.freq) {
2513  "No more combinations to try, audio open failed\n");
2514  return -1;
2515  }
2516  }
2517  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2518  }
2519  if (spec.format != AUDIO_S16SYS) {
2521  "SDL advised audio format %d is not supported!\n", spec.format);
2522  return -1;
2523  }
2524  if (spec.channels != wanted_spec.channels) {
2525  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2526  if (!wanted_channel_layout) {
2528  "SDL advised channel count %d is not supported!\n", spec.channels);
2529  return -1;
2530  }
2531  }
2532 
2533  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2534  audio_hw_params->freq = spec.freq;
2535  audio_hw_params->channel_layout = wanted_channel_layout;
2536  audio_hw_params->channels = spec.channels;
2537  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2538  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2539  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2540  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2541  return -1;
2542  }
2543  return spec.size;
2544 }
2545 
2546 /* open a given stream. Return 0 if OK */
2547 static int stream_component_open(VideoState *is, int stream_index)
2548 {
2549  AVFormatContext *ic = is->ic;
2550  AVCodecContext *avctx;
2551  AVCodec *codec;
2552  const char *forced_codec_name = NULL;
2553  AVDictionary *opts = NULL;
2554  AVDictionaryEntry *t = NULL;
2555  int sample_rate, nb_channels;
2556  int64_t channel_layout;
2557  int ret = 0;
2558  int stream_lowres = lowres;
2559 
2560  if (stream_index < 0 || stream_index >= ic->nb_streams)
2561  return -1;
2562 
2563  avctx = avcodec_alloc_context3(NULL);
2564  if (!avctx)
2565  return AVERROR(ENOMEM);
2566 
2567  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2568  if (ret < 0)
2569  goto fail;
2570  av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
2571 
2572  codec = avcodec_find_decoder(avctx->codec_id);
2573 
2574  switch(avctx->codec_type){
2575  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2576  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2577  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2578  }
2579  if (forced_codec_name)
2580  codec = avcodec_find_decoder_by_name(forced_codec_name);
2581  if (!codec) {
2582  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2583  "No codec could be found with name '%s'\n", forced_codec_name);
2584  else av_log(NULL, AV_LOG_WARNING,
2585  "No codec could be found with id %d\n", avctx->codec_id);
2586  ret = AVERROR(EINVAL);
2587  goto fail;
2588  }
2589 
2590  avctx->codec_id = codec->id;
2591  if(stream_lowres > av_codec_get_max_lowres(codec)){
2592  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2593  av_codec_get_max_lowres(codec));
2594  stream_lowres = av_codec_get_max_lowres(codec);
2595  }
2596  av_codec_set_lowres(avctx, stream_lowres);
2597 
2598 #if FF_API_EMU_EDGE
2599  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2600 #endif
2601  if (fast)
2602  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2603 #if FF_API_EMU_EDGE
2604  if(codec->capabilities & AV_CODEC_CAP_DR1)
2605  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2606 #endif
2607 
2608  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2609  if (!av_dict_get(opts, "threads", NULL, 0))
2610  av_dict_set(&opts, "threads", "auto", 0);
2611  if (stream_lowres)
2612  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2613  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2614  av_dict_set(&opts, "refcounted_frames", "1", 0);
2615  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2616  goto fail;
2617  }
2618  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2619  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2621  goto fail;
2622  }
2623 
2624  is->eof = 0;
2625  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2626  switch (avctx->codec_type) {
2627  case AVMEDIA_TYPE_AUDIO:
2628 #if CONFIG_AVFILTER
2629  {
2630  AVFilterLink *link;
2631 
2632  is->audio_filter_src.freq = avctx->sample_rate;
2633  is->audio_filter_src.channels = avctx->channels;
2634  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2635  is->audio_filter_src.fmt = avctx->sample_fmt;
2636  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2637  goto fail;
2638  link = is->out_audio_filter->inputs[0];
2639  sample_rate = link->sample_rate;
2640  nb_channels = avfilter_link_get_channels(link);
2641  channel_layout = link->channel_layout;
2642  }
2643 #else
2644  sample_rate = avctx->sample_rate;
2645  nb_channels = avctx->channels;
2646  channel_layout = avctx->channel_layout;
2647 #endif
2648 
2649  /* prepare audio output */
2650  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2651  goto fail;
2652  is->audio_hw_buf_size = ret;
2653  is->audio_src = is->audio_tgt;
2654  is->audio_buf_size = 0;
2655  is->audio_buf_index = 0;
2656 
2657  /* init averaging filter */
2658  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2659  is->audio_diff_avg_count = 0;
2660  /* since we do not have a precise anough audio FIFO fullness,
2661  we correct audio sync only if larger than this threshold */
2663 
2664  is->audio_stream = stream_index;
2665  is->audio_st = ic->streams[stream_index];
2666 
2667  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2669  is->auddec.start_pts = is->audio_st->start_time;
2671  }
2672  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2673  goto out;
2674  SDL_PauseAudio(0);
2675  break;
2676  case AVMEDIA_TYPE_VIDEO:
2677  is->video_stream = stream_index;
2678  is->video_st = ic->streams[stream_index];
2679 
2680  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2681  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2682  goto out;
2683  is->queue_attachments_req = 1;
2684  break;
2685  case AVMEDIA_TYPE_SUBTITLE:
2686  is->subtitle_stream = stream_index;
2687  is->subtitle_st = ic->streams[stream_index];
2688 
2689  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2690  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2691  goto out;
2692  break;
2693  default:
2694  break;
2695  }
2696  goto out;
2697 
2698 fail:
2699  avcodec_free_context(&avctx);
2700 out:
2701  av_dict_free(&opts);
2702 
2703  return ret;
2704 }
2705 
2706 static int decode_interrupt_cb(void *ctx)
2707 {
2708  VideoState *is = ctx;
2709  return is->abort_request;
2710 }
2711 
2712 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2713  return stream_id < 0 ||
2714  queue->abort_request ||
2716  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2717 }
2718 
2720 {
2721  if( !strcmp(s->iformat->name, "rtp")
2722  || !strcmp(s->iformat->name, "rtsp")
2723  || !strcmp(s->iformat->name, "sdp")
2724  )
2725  return 1;
2726 
2727  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2728  || !strncmp(s->filename, "udp:", 4)
2729  )
2730  )
2731  return 1;
2732  return 0;
2733 }
2734 
2735 /* this thread gets the stream from the disk or the network */
2736 static int read_thread(void *arg)
2737 {
2738  VideoState *is = arg;
2739  AVFormatContext *ic = NULL;
2740  int err, i, ret;
2741  int st_index[AVMEDIA_TYPE_NB];
2742  AVPacket pkt1, *pkt = &pkt1;
2743  int64_t stream_start_time;
2744  int pkt_in_play_range = 0;
2745  AVDictionaryEntry *t;
2746  AVDictionary **opts;
2747  int orig_nb_streams;
2748  SDL_mutex *wait_mutex = SDL_CreateMutex();
2749  int scan_all_pmts_set = 0;
2750  int64_t pkt_ts;
2751 
2752  if (!wait_mutex) {
2753  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2754  ret = AVERROR(ENOMEM);
2755  goto fail;
2756  }
2757 
2758  memset(st_index, -1, sizeof(st_index));
2759  is->last_video_stream = is->video_stream = -1;
2760  is->last_audio_stream = is->audio_stream = -1;
2761  is->last_subtitle_stream = is->subtitle_stream = -1;
2762  is->eof = 0;
2763 
2764  ic = avformat_alloc_context();
2765  if (!ic) {
2766  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2767  ret = AVERROR(ENOMEM);
2768  goto fail;
2769  }
2771  ic->interrupt_callback.opaque = is;
2772  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2773  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2774  scan_all_pmts_set = 1;
2775  }
2776  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2777  if (err < 0) {
2778  print_error(is->filename, err);
2779  ret = -1;
2780  goto fail;
2781  }
2782  if (scan_all_pmts_set)
2783  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2784 
2786  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2788  goto fail;
2789  }
2790  is->ic = ic;
2791 
2792  if (genpts)
2793  ic->flags |= AVFMT_FLAG_GENPTS;
2794 
2796 
2798  orig_nb_streams = ic->nb_streams;
2799 
2800  err = avformat_find_stream_info(ic, opts);
2801 
2802  for (i = 0; i < orig_nb_streams; i++)
2803  av_dict_free(&opts[i]);
2804  av_freep(&opts);
2805 
2806  if (err < 0) {
2808  "%s: could not find codec parameters\n", is->filename);
2809  ret = -1;
2810  goto fail;
2811  }
2812 
2813  if (ic->pb)
2814  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2815 
2816  if (seek_by_bytes < 0)
2817  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2818 
2819  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2820 
2821  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2822  window_title = av_asprintf("%s - %s", t->value, input_filename);
2823 
2824  /* if seeking requested, we execute it */
2825  if (start_time != AV_NOPTS_VALUE) {
2826  int64_t timestamp;
2827 
2828  timestamp = start_time;
2829  /* add the stream start time */
2830  if (ic->start_time != AV_NOPTS_VALUE)
2831  timestamp += ic->start_time;
2832  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2833  if (ret < 0) {
2834  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2835  is->filename, (double)timestamp / AV_TIME_BASE);
2836  }
2837  }
2838 
2839  is->realtime = is_realtime(ic);
2840 
2841  if (show_status)
2842  av_dump_format(ic, 0, is->filename, 0);
2843 
2844  for (i = 0; i < ic->nb_streams; i++) {
2845  AVStream *st = ic->streams[i];
2846  enum AVMediaType type = st->codecpar->codec_type;
2847  st->discard = AVDISCARD_ALL;
2848  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2849  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2850  st_index[type] = i;
2851  }
2852  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2853  if (wanted_stream_spec[i] && st_index[i] == -1) {
2854  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2855  st_index[i] = INT_MAX;
2856  }
2857  }
2858 
2859  if (!video_disable)
2860  st_index[AVMEDIA_TYPE_VIDEO] =
2862  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2863  if (!audio_disable)
2864  st_index[AVMEDIA_TYPE_AUDIO] =
2866  st_index[AVMEDIA_TYPE_AUDIO],
2867  st_index[AVMEDIA_TYPE_VIDEO],
2868  NULL, 0);
2870  st_index[AVMEDIA_TYPE_SUBTITLE] =
2872  st_index[AVMEDIA_TYPE_SUBTITLE],
2873  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2874  st_index[AVMEDIA_TYPE_AUDIO] :
2875  st_index[AVMEDIA_TYPE_VIDEO]),
2876  NULL, 0);
2877 
2878  is->show_mode = show_mode;
2879  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2880  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2881  AVCodecParameters *codecpar = st->codecpar;
2883  if (codecpar->width)
2884  set_default_window_size(codecpar->width, codecpar->height, sar);
2885  }
2886 
2887  /* open the streams */
2888  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2889  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2890  }
2891 
2892  ret = -1;
2893  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2894  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2895  }
2896  if (is->show_mode == SHOW_MODE_NONE)
2897  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2898 
2899  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2900  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2901  }
2902 
2903  if (is->video_stream < 0 && is->audio_stream < 0) {
2904  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2905  is->filename);
2906  ret = -1;
2907  goto fail;
2908  }
2909 
2910  if (infinite_buffer < 0 && is->realtime)
2911  infinite_buffer = 1;
2912 
2913  for (;;) {
2914  if (is->abort_request)
2915  break;
2916  if (is->paused != is->last_paused) {
2917  is->last_paused = is->paused;
2918  if (is->paused)
2919  is->read_pause_return = av_read_pause(ic);
2920  else
2921  av_read_play(ic);
2922  }
2923 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2924  if (is->paused &&
2925  (!strcmp(ic->iformat->name, "rtsp") ||
2926  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2927  /* wait 10 ms to avoid trying to get another packet */
2928  /* XXX: horrible */
2929  SDL_Delay(10);
2930  continue;
2931  }
2932 #endif
2933  if (is->seek_req) {
2934  int64_t seek_target = is->seek_pos;
2935  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2936  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2937 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2938 // of the seek_pos/seek_rel variables
2939 
2940  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2941  if (ret < 0) {
2943  "%s: error while seeking\n", is->ic->filename);
2944  } else {
2945  if (is->audio_stream >= 0) {
2946  packet_queue_flush(&is->audioq);
2947  packet_queue_put(&is->audioq, &flush_pkt);
2948  }
2949  if (is->subtitle_stream >= 0) {
2951  packet_queue_put(&is->subtitleq, &flush_pkt);
2952  }
2953  if (is->video_stream >= 0) {
2954  packet_queue_flush(&is->videoq);
2955  packet_queue_put(&is->videoq, &flush_pkt);
2956  }
2957  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2958  set_clock(&is->extclk, NAN, 0);
2959  } else {
2960  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2961  }
2962  }
2963  is->seek_req = 0;
2964  is->queue_attachments_req = 1;
2965  is->eof = 0;
2966  if (is->paused)
2967  step_to_next_frame(is);
2968  }
2969  if (is->queue_attachments_req) {
2971  AVPacket copy;
2972  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
2973  goto fail;
2974  packet_queue_put(&is->videoq, &copy);
2976  }
2977  is->queue_attachments_req = 0;
2978  }
2979 
2980  /* if the queue are full, no need to read more */
2981  if (infinite_buffer<1 &&
2982  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2983  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2986  /* wait 10 ms */
2987  SDL_LockMutex(wait_mutex);
2988  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2989  SDL_UnlockMutex(wait_mutex);
2990  continue;
2991  }
2992  if (!is->paused &&
2993  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2994  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2995  if (loop != 1 && (!loop || --loop)) {
2996  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2997  } else if (autoexit) {
2998  ret = AVERROR_EOF;
2999  goto fail;
3000  }
3001  }
3002  ret = av_read_frame(ic, pkt);
3003  if (ret < 0) {
3004  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3005  if (is->video_stream >= 0)
3007  if (is->audio_stream >= 0)
3009  if (is->subtitle_stream >= 0)
3011  is->eof = 1;
3012  }
3013  if (ic->pb && ic->pb->error)
3014  break;
3015  SDL_LockMutex(wait_mutex);
3016  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3017  SDL_UnlockMutex(wait_mutex);
3018  continue;
3019  } else {
3020  is->eof = 0;
3021  }
3022  /* check if packet is in play range specified by user, then queue, otherwise discard */
3023  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3024  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3025  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3026  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3027  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3028  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3029  <= ((double)duration / 1000000);
3030  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3031  packet_queue_put(&is->audioq, pkt);
3032  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3034  packet_queue_put(&is->videoq, pkt);
3035  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3036  packet_queue_put(&is->subtitleq, pkt);
3037  } else {
3038  av_packet_unref(pkt);
3039  }
3040  }
3041 
3042  ret = 0;
3043  fail:
3044  if (ic && !is->ic)
3045  avformat_close_input(&ic);
3046 
3047  if (ret != 0) {
3048  SDL_Event event;
3049 
3050  event.type = FF_QUIT_EVENT;
3051  event.user.data1 = is;
3052  SDL_PushEvent(&event);
3053  }
3054  SDL_DestroyMutex(wait_mutex);
3055  return 0;
3056 }
3057 
3058 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3059 {
3060  VideoState *is;
3061 
3062  is = av_mallocz(sizeof(VideoState));
3063  if (!is)
3064  return NULL;
3065  is->filename = av_strdup(filename);
3066  if (!is->filename)
3067  goto fail;
3068  is->iformat = iformat;
3069  is->ytop = 0;
3070  is->xleft = 0;
3071 
3072  /* start video display */
3073  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3074  goto fail;
3075  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3076  goto fail;
3077  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3078  goto fail;
3079 
3080  if (packet_queue_init(&is->videoq) < 0 ||
3081  packet_queue_init(&is->audioq) < 0 ||
3082  packet_queue_init(&is->subtitleq) < 0)
3083  goto fail;
3084 
3085  if (!(is->continue_read_thread = SDL_CreateCond())) {
3086  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3087  goto fail;
3088  }
3089 
3090  init_clock(&is->vidclk, &is->videoq.serial);
3091  init_clock(&is->audclk, &is->audioq.serial);
3092  init_clock(&is->extclk, &is->extclk.serial);
3093  is->audio_clock_serial = -1;
3094  is->audio_volume = SDL_MIX_MAXVOLUME;
3095  is->muted = 0;
3096  is->av_sync_type = av_sync_type;
3097  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3098  if (!is->read_tid) {
3099  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3100 fail:
3101  stream_close(is);
3102  return NULL;
3103  }
3104  return is;
3105 }
3106 
3108 {
3109  AVFormatContext *ic = is->ic;
3110  int start_index, stream_index;
3111  int old_index;
3112  AVStream *st;
3113  AVProgram *p = NULL;
3114  int nb_streams = is->ic->nb_streams;
3115 
3116  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3117  start_index = is->last_video_stream;
3118  old_index = is->video_stream;
3119  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3120  start_index = is->last_audio_stream;
3121  old_index = is->audio_stream;
3122  } else {
3123  start_index = is->last_subtitle_stream;
3124  old_index = is->subtitle_stream;
3125  }
3126  stream_index = start_index;
3127 
3128  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3130  if (p) {
3131  nb_streams = p->nb_stream_indexes;
3132  for (start_index = 0; start_index < nb_streams; start_index++)
3133  if (p->stream_index[start_index] == stream_index)
3134  break;
3135  if (start_index == nb_streams)
3136  start_index = -1;
3137  stream_index = start_index;
3138  }
3139  }
3140 
3141  for (;;) {
3142  if (++stream_index >= nb_streams)
3143  {
3144  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3145  {
3146  stream_index = -1;
3147  is->last_subtitle_stream = -1;
3148  goto the_end;
3149  }
3150  if (start_index == -1)
3151  return;
3152  stream_index = 0;
3153  }
3154  if (stream_index == start_index)
3155  return;
3156  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3157  if (st->codecpar->codec_type == codec_type) {
3158  /* check that parameters are OK */
3159  switch (codec_type) {
3160  case AVMEDIA_TYPE_AUDIO:
3161  if (st->codecpar->sample_rate != 0 &&
3162  st->codecpar->channels != 0)
3163  goto the_end;
3164  break;
3165  case AVMEDIA_TYPE_VIDEO:
3166  case AVMEDIA_TYPE_SUBTITLE:
3167  goto the_end;
3168  default:
3169  break;
3170  }
3171  }
3172  }
3173  the_end:
3174  if (p && stream_index != -1)
3175  stream_index = p->stream_index[stream_index];
3176  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3177  av_get_media_type_string(codec_type),
3178  old_index,
3179  stream_index);
3180 
3181  stream_component_close(is, old_index);
3182  stream_component_open(is, stream_index);
3183 }
3184 
3185 
3187 {
3189  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3190 }
3191 
3193 {
3194  int next = is->show_mode;
3195  do {
3196  next = (next + 1) % SHOW_MODE_NB;
3197  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3198  if (is->show_mode != next) {
3199  is->force_refresh = 1;
3200  is->show_mode = next;
3201  }
3202 }
3203 
3204 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3205  double remaining_time = 0.0;
3206  SDL_PumpEvents();
3207  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3209  SDL_ShowCursor(0);
3210  cursor_hidden = 1;
3211  }
3212  if (remaining_time > 0.0)
3213  av_usleep((int64_t)(remaining_time * 1000000.0));
3214  remaining_time = REFRESH_RATE;
3215  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3216  video_refresh(is, &remaining_time);
3217  SDL_PumpEvents();
3218  }
3219 }
3220 
3221 static void seek_chapter(VideoState *is, int incr)
3222 {
3223  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3224  int i;
3225 
3226  if (!is->ic->nb_chapters)
3227  return;
3228 
3229  /* find the current chapter */
3230  for (i = 0; i < is->ic->nb_chapters; i++) {
3231  AVChapter *ch = is->ic->chapters[i];
3232  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3233  i--;
3234  break;
3235  }
3236  }
3237 
3238  i += incr;
3239  i = FFMAX(i, 0);
3240  if (i >= is->ic->nb_chapters)
3241  return;
3242 
3243  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3244  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3245  AV_TIME_BASE_Q), 0, 0);
3246 }
3247 
3248 /* handle an event sent by the GUI */
3249 static void event_loop(VideoState *cur_stream)
3250 {
3251  SDL_Event event;
3252  double incr, pos, frac;
3253 
3254  for (;;) {
3255  double x;
3256  refresh_loop_wait_event(cur_stream, &event);
3257  switch (event.type) {
3258  case SDL_KEYDOWN:
3259  if (exit_on_keydown) {
3260  do_exit(cur_stream);
3261  break;
3262  }
3263  switch (event.key.keysym.sym) {
3264  case SDLK_ESCAPE:
3265  case SDLK_q:
3266  do_exit(cur_stream);
3267  break;
3268  case SDLK_f:
3269  toggle_full_screen(cur_stream);
3270  cur_stream->force_refresh = 1;
3271  break;
3272  case SDLK_p:
3273  case SDLK_SPACE:
3274  toggle_pause(cur_stream);
3275  break;
3276  case SDLK_m:
3277  toggle_mute(cur_stream);
3278  break;
3279  case SDLK_KP_MULTIPLY:
3280  case SDLK_0:
3281  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3282  break;
3283  case SDLK_KP_DIVIDE:
3284  case SDLK_9:
3285  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3286  break;
3287  case SDLK_s: // S: Step to next frame
3288  step_to_next_frame(cur_stream);
3289  break;
3290  case SDLK_a:
3292  break;
3293  case SDLK_v:
3295  break;
3296  case SDLK_c:
3300  break;
3301  case SDLK_t:
3303  break;
3304  case SDLK_w:
3305 #if CONFIG_AVFILTER
3306  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3307  if (++cur_stream->vfilter_idx >= nb_vfilters)
3308  cur_stream->vfilter_idx = 0;
3309  } else {
3310  cur_stream->vfilter_idx = 0;
3311  toggle_audio_display(cur_stream);
3312  }
3313 #else
3314  toggle_audio_display(cur_stream);
3315 #endif
3316  break;
3317  case SDLK_PAGEUP:
3318  if (cur_stream->ic->nb_chapters <= 1) {
3319  incr = 600.0;
3320  goto do_seek;
3321  }
3322  seek_chapter(cur_stream, 1);
3323  break;
3324  case SDLK_PAGEDOWN:
3325  if (cur_stream->ic->nb_chapters <= 1) {
3326  incr = -600.0;
3327  goto do_seek;
3328  }
3329  seek_chapter(cur_stream, -1);
3330  break;
3331  case SDLK_LEFT:
3332  incr = -10.0;
3333  goto do_seek;
3334  case SDLK_RIGHT:
3335  incr = 10.0;
3336  goto do_seek;
3337  case SDLK_UP:
3338  incr = 60.0;
3339  goto do_seek;
3340  case SDLK_DOWN:
3341  incr = -60.0;
3342  do_seek:
3343  if (seek_by_bytes) {
3344  pos = -1;
3345  if (pos < 0 && cur_stream->video_stream >= 0)
3346  pos = frame_queue_last_pos(&cur_stream->pictq);
3347  if (pos < 0 && cur_stream->audio_stream >= 0)
3348  pos = frame_queue_last_pos(&cur_stream->sampq);
3349  if (pos < 0)
3350  pos = avio_tell(cur_stream->ic->pb);
3351  if (cur_stream->ic->bit_rate)
3352  incr *= cur_stream->ic->bit_rate / 8.0;
3353  else
3354  incr *= 180000.0;
3355  pos += incr;
3356  stream_seek(cur_stream, pos, incr, 1);
3357  } else {
3358  pos = get_master_clock(cur_stream);
3359  if (isnan(pos))
3360  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3361  pos += incr;
3362  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3363  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3364  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3365  }
3366  break;
3367  default:
3368  break;
3369  }
3370  break;
3371  case SDL_MOUSEBUTTONDOWN:
3372  if (exit_on_mousedown) {
3373  do_exit(cur_stream);
3374  break;
3375  }
3376  if (event.button.button == SDL_BUTTON_LEFT) {
3377  static int64_t last_mouse_left_click = 0;
3378  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3379  toggle_full_screen(cur_stream);
3380  cur_stream->force_refresh = 1;
3381  last_mouse_left_click = 0;
3382  } else {
3383  last_mouse_left_click = av_gettime_relative();
3384  }
3385  }
3386  case SDL_MOUSEMOTION:
3387  if (cursor_hidden) {
3388  SDL_ShowCursor(1);
3389  cursor_hidden = 0;
3390  }
3392  if (event.type == SDL_MOUSEBUTTONDOWN) {
3393  if (event.button.button != SDL_BUTTON_RIGHT)
3394  break;
3395  x = event.button.x;
3396  } else {
3397  if (!(event.motion.state & SDL_BUTTON_RMASK))
3398  break;
3399  x = event.motion.x;
3400  }
3401  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3402  uint64_t size = avio_size(cur_stream->ic->pb);
3403  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3404  } else {
3405  int64_t ts;
3406  int ns, hh, mm, ss;
3407  int tns, thh, tmm, tss;
3408  tns = cur_stream->ic->duration / 1000000LL;
3409  thh = tns / 3600;
3410  tmm = (tns % 3600) / 60;
3411  tss = (tns % 60);
3412  frac = x / cur_stream->width;
3413  ns = frac * tns;
3414  hh = ns / 3600;
3415  mm = (ns % 3600) / 60;
3416  ss = (ns % 60);
3418  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3419  hh, mm, ss, thh, tmm, tss);
3420  ts = frac * cur_stream->ic->duration;
3421  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3422  ts += cur_stream->ic->start_time;
3423  stream_seek(cur_stream, ts, 0, 0);
3424  }
3425  break;
3426  case SDL_WINDOWEVENT:
3427  switch (event.window.event) {
3428  case SDL_WINDOWEVENT_RESIZED:
3429  screen_width = cur_stream->width = event.window.data1;
3430  screen_height = cur_stream->height = event.window.data2;
3431  if (cur_stream->vis_texture) {
3432  SDL_DestroyTexture(cur_stream->vis_texture);
3433  cur_stream->vis_texture = NULL;
3434  }
3435  case SDL_WINDOWEVENT_EXPOSED:
3436  cur_stream->force_refresh = 1;
3437  }
3438  break;
3439  case SDL_QUIT:
3440  case FF_QUIT_EVENT:
3441  do_exit(cur_stream);
3442  break;
3443  case FF_ALLOC_EVENT:
3444  alloc_picture(event.user.data1);
3445  break;
3446  default:
3447  break;
3448  }
3449  }
3450 }
3451 
3452 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3453 {
3454  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3455  return opt_default(NULL, "video_size", arg);
3456 }
3457 
3458 static int opt_width(void *optctx, const char *opt, const char *arg)
3459 {
3460  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3461  return 0;
3462 }
3463 
3464 static int opt_height(void *optctx, const char *opt, const char *arg)
3465 {
3466  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3467  return 0;
3468 }
3469 
3470 static int opt_format(void *optctx, const char *opt, const char *arg)
3471 {
3472  file_iformat = av_find_input_format(arg);
3473  if (!file_iformat) {
3474  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3475  return AVERROR(EINVAL);
3476  }
3477  return 0;
3478 }
3479 
3480 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3481 {
3482  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3483  return opt_default(NULL, "pixel_format", arg);
3484 }
3485 
3486 static int opt_sync(void *optctx, const char *opt, const char *arg)
3487 {
3488  if (!strcmp(arg, "audio"))
3490  else if (!strcmp(arg, "video"))
3492  else if (!strcmp(arg, "ext"))
3494  else {
3495  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3496  exit(1);
3497  }
3498  return 0;
3499 }
3500 
3501 static int opt_seek(void *optctx, const char *opt, const char *arg)
3502 {
3503  start_time = parse_time_or_die(opt, arg, 1);
3504  return 0;
3505 }
3506 
3507 static int opt_duration(void *optctx, const char *opt, const char *arg)
3508 {
3509  duration = parse_time_or_die(opt, arg, 1);
3510  return 0;
3511 }
3512 
3513 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3514 {
3515  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3516  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3517  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3518  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3519  return 0;
3520 }
3521 
3522 static void opt_input_file(void *optctx, const char *filename)
3523 {
3524  if (input_filename) {
3526  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3527  filename, input_filename);
3528  exit(1);
3529  }
3530  if (!strcmp(filename, "-"))
3531  filename = "pipe:";
3532  input_filename = filename;
3533 }
3534 
3535 static int opt_codec(void *optctx, const char *opt, const char *arg)
3536 {
3537  const char *spec = strchr(opt, ':');
3538  if (!spec) {
3540  "No media specifier was specified in '%s' in option '%s'\n",
3541  arg, opt);
3542  return AVERROR(EINVAL);
3543  }
3544  spec++;
3545  switch (spec[0]) {
3546  case 'a' : audio_codec_name = arg; break;
3547  case 's' : subtitle_codec_name = arg; break;
3548  case 'v' : video_codec_name = arg; break;
3549  default:
3551  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3552  return AVERROR(EINVAL);
3553  }
3554  return 0;
3555 }
3556 
3557 static int dummy;
3558 
3559 static const OptionDef options[] = {
3560 #include "cmdutils_common_opts.h"
3561  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3562  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3563  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3564  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3565  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3566  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3567  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3568  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3569  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3570  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3571  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3572  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3573  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3574  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3575  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3576  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3577  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3578  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3579  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3580  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3581  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3582  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3583  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3584  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3585  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3586  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3587  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3588  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3589  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3590 #if CONFIG_AVFILTER
3591  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3592  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3593 #endif
3594  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3595  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3596  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3597  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3598  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3599  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3600  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3601  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3602  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3603  { NULL, },
3604 };
3605 
3606 static void show_usage(void)
3607 {
3608  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3609  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3610  av_log(NULL, AV_LOG_INFO, "\n");
3611 }
3612 
3613 void show_help_default(const char *opt, const char *arg)
3614 {
3616  show_usage();
3617  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3618  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3619  printf("\n");
3622 #if !CONFIG_AVFILTER
3624 #else
3626 #endif
3627  printf("\nWhile playing:\n"
3628  "q, ESC quit\n"
3629  "f toggle full screen\n"
3630  "p, SPC pause\n"
3631  "m toggle mute\n"
3632  "9, 0 decrease and increase volume respectively\n"
3633  "/, * decrease and increase volume respectively\n"
3634  "a cycle audio channel in the current program\n"
3635  "v cycle video channel\n"
3636  "t cycle subtitle channel in the current program\n"
3637  "c cycle program\n"
3638  "w cycle video filters or show modes\n"
3639  "s activate frame-step mode\n"
3640  "left/right seek backward/forward 10 seconds\n"
3641  "down/up seek backward/forward 1 minute\n"
3642  "page down/page up seek backward/forward 10 minutes\n"
3643  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3644  "left double-click toggle full screen\n"
3645  );
3646 }
3647 
3648 static int lockmgr(void **mtx, enum AVLockOp op)
3649 {
3650  switch(op) {
3651  case AV_LOCK_CREATE:
3652  *mtx = SDL_CreateMutex();
3653  if(!*mtx) {
3654  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
3655  return 1;
3656  }
3657  return 0;
3658  case AV_LOCK_OBTAIN:
3659  return !!SDL_LockMutex(*mtx);
3660  case AV_LOCK_RELEASE:
3661  return !!SDL_UnlockMutex(*mtx);
3662  case AV_LOCK_DESTROY:
3663  SDL_DestroyMutex(*mtx);
3664  return 0;
3665  }
3666  return 1;
3667 }
3668 
3669 /* Called from the main */
3670 int main(int argc, char **argv)
3671 {
3672  int flags;
3673  VideoState *is;
3674 
3675  init_dynload();
3676 
3678  parse_loglevel(argc, argv, options);
3679 
3680  /* register all codecs, demux and protocols */
3681 #if CONFIG_AVDEVICE
3683 #endif
3684 #if CONFIG_AVFILTER
3686 #endif
3687  av_register_all();
3689 
3690  init_opts();
3691 
3692  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3693  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3694 
3695  show_banner(argc, argv, options);
3696 
3697  parse_options(NULL, argc, argv, options, opt_input_file);
3698 
3699  if (!input_filename) {
3700  show_usage();
3701  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3703  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3704  exit(1);
3705  }
3706 
3707  if (display_disable) {
3708  video_disable = 1;
3709  }
3710  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3711  if (audio_disable)
3712  flags &= ~SDL_INIT_AUDIO;
3713  else {
3714  /* Try to work around an occasional ALSA buffer underflow issue when the
3715  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3716  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3717  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3718  }
3719  if (display_disable)
3720  flags &= ~SDL_INIT_VIDEO;
3721  if (SDL_Init (flags)) {
3722  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3723  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3724  exit(1);
3725  }
3726 
3727  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3728  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3729 
3731  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3732  do_exit(NULL);
3733  }
3734 
3735  av_init_packet(&flush_pkt);
3736  flush_pkt.data = (uint8_t *)&flush_pkt;
3737 
3738  is = stream_open(input_filename, file_iformat);
3739  if (!is) {
3740  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3741  do_exit(NULL);
3742  }
3743 
3744  event_loop(is);
3745 
3746  /* never returns */
3747 
3748  return 0;
3749 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1543
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:788
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:491
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3513
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:113
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:738
static void video_image_display(VideoState *is)
Definition: ffplay.c:893
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:221
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:492
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:466
static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:860
const char * s
Definition: avisynth_c.h:768
int width
Definition: ffplay.c:293
#define OPT_EXPERT
Definition: cmdutils.h:168
static double get_clock(Clock *c)
Definition: ffplay.c:1310
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:309
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3464
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:179
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3419
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2473
FrameQueue pictq
Definition: ffplay.c:225
Decoder auddec
Definition: ffplay.c:229
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:278
This structure describes decoded (raw) audio or video data.
Definition: frame.h:184
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:362
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3922
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1592
double rdftspeed
Definition: ffplay.c:342
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:281
static AVInputFormat * file_iformat
Definition: ffplay.c:311
#define OPT_VIDEO
Definition: cmdutils.h:170
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3522
double get_rotation(AVStream *st)
Definition: cmdutils.c:2079
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3830
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3470
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:6149
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:201
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1358
Main libavfilter public API header.
int rindex
Definition: ffplay.c:172
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:315
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:283
FrameQueue sampq
Definition: ffplay.c:227
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:187
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:4056
int seek_flags
Definition: ffplay.c:214
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:972
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:708
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4736
static int64_t cur_time
Definition: ffserver.c:262
#define OPT_AUDIO
Definition: cmdutils.h:171
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3107
void av_codec_set_pkt_timebase(AVCodecContext *avctx, AVRational val)
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3480
int size
Definition: avcodec.h:1602
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1422
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1408
int av_log2(unsigned v)
Definition: intmath.c:26
static int seek_by_bytes
Definition: ffplay.c:322
double audio_diff_cum
Definition: ffplay.c:239
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:207
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1440
AVCodecContext * avctx
Definition: ffplay.c:193
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1145
int paused
Definition: ffplay.c:210
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3535
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
attribute_deprecated int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2316
unsigned num_rects
Definition: avcodec.h:3960
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1322
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1435
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:241
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:517
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:252
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:787
int64_t channel_layout
Definition: ffplay.c:137
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:318
AVStream * audio_st
Definition: ffplay.c:243
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2643
static const char * audio_codec_name
Definition: ffplay.c:339
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3600
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3221
double pts_drift
Definition: ffplay.c:145
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1983
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3972
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:6146
int width
Definition: ffplay.c:163
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:791
AVStream * video_st
Definition: ffplay.c:285
Clock extclk
Definition: ffplay.c:223
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3058
void * opaque
Definition: avio.h:52
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:288
AVSubtitleRect ** rects
Definition: avcodec.h:3961
Format I/O context.
Definition: avformat.h:1338
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3192
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4759
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:279
static int16_t block[64]
Definition: dct.c:113
int av_sync_type
Definition: ffplay.c:235
unsigned int nb_stream_indexes
Definition: avformat.h:1270
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:177
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3924
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:315
double pts
Definition: ffplay.c:158
static AVFilter ** last_filter
Definition: avfilter.c:514
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:240
AVRational start_pts_tb
Definition: ffplay.c:199
static int read_thread(void *arg)
Definition: ffplay.c:2736
int keep_last
Definition: ffplay.c:176
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:133
int rdft_bits
Definition: ffplay.c:270
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:890
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:691
static int64_t start_time
Definition: ffplay.c:326
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2446
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:92
Lock the mutex.
Definition: avcodec.h:6148
uint8_t
static int nb_streams
Definition: ffprobe.c:254
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:145
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:539
static int default_width
Definition: ffplay.c:314
int last_video_stream
Definition: ffplay.c:305
int width
Video only.
Definition: avcodec.h:4046
int last_subtitle_stream
Definition: ffplay.c:305
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:678
#define HAS_ARG
Definition: cmdutils.h:166
int audio_hw_buf_size
Definition: ffplay.c:245
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1122
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2706
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:259
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1619
int finished
Definition: ffplay.c:195
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3249
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:374
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4780
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:268
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:484
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1450
static int framedrop
Definition: ffplay.c:336
SDL_Texture * vis_texture
Definition: ffplay.c:274
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1668
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:78
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1406
SDL_Texture * bmp
Definition: ffplay.c:161
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:4223
int bytes_per_sec
Definition: ffplay.c:140
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:132
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:354
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:385
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1449
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:511
static void sigterm_handler(int sig)
Definition: ffplay.c:1236
uint8_t * data
Definition: avcodec.h:1601
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:377
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4671
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:91
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:171
Definition: mxfdec.c:271
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:156
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:500
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3925
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:511
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:170
static int64_t duration
Definition: ffplay.c:327
AVRational sar
Definition: ffplay.c:166
AVPacket pkt_temp
Definition: ffplay.c:191
unsigned int * stream_index
Definition: avformat.h:1269
#define av_log(a,...)
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:275
PacketQueue videoq
Definition: ffplay.c:286
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2040
AVDictionary * format_opts
Definition: cmdutils.c:72
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:807
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:302
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1441
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:511
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3934
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3614
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3917
int audio_diff_avg_count
Definition: ffplay.c:242
int ytop
Definition: ffplay.c:293
int width
width and height of the video frame
Definition: frame.h:236
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1554
int seek_req
Definition: ffplay.c:213
int(* callback)(void *)
Definition: avio.h:51
Create a mutex.
Definition: avcodec.h:6147
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1508
int read_pause_return
Definition: ffplay.c:217
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:488
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:292
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3923
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:748
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:178
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:158
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2094
RDFTContext * rdft
Definition: ffplay.c:269
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:764
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:96
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:438
static int autorotate
Definition: ffplay.c:350
int capabilities
Codec capabilities.
Definition: avcodec.h:3619
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3998
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1459
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3976
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1771
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:539
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:395
AVChapter ** chapters
Definition: avformat.h:1544
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:359
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1179
int video_stream
Definition: ffplay.c:284
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1342
int xpos
Definition: ffplay.c:272
int channels
Definition: ffplay.c:136
static enum ShowMode show_mode
Definition: ffplay.c:338
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1265
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:502
static const OptionDef options[]
Definition: ffplay.c:3559
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:128
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3557
#define fail()
Definition: checkasm.h:83
int8_t exp
Definition: eval.c:64
double audio_clock
Definition: ffplay.c:237
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:209
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2489
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:70
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3486
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2334
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2249
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3959
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:639
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3962
static int genpts
Definition: ffplay.c:329
static AVPacket flush_pkt
Definition: ffplay.c:356
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:353
double frame_last_returned_time
Definition: ffplay.c:282
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:519
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:499
static const char * subtitle_codec_name
Definition: ffplay.c:340
static int subtitle_disable
Definition: ffplay.c:320
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:135
int max_size
Definition: ffplay.c:175
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1394
int step
Definition: ffplay.c:294
SDL_Thread * decoder_tid
Definition: ffplay.c:202
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:361
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4007
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:251
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:164
int linesize[4]
Definition: avcodec.h:3940
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:258
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:1414
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:248
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:178
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:406
int windex
Definition: ffplay.c:173
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:556
static int cursor_hidden
Definition: ffplay.c:344
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:549
#define width
AVSubtitle sub
Definition: ffplay.c:156
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3648
int width
picture width / height.
Definition: avcodec.h:1863
int main(int argc, char **argv)
Definition: ffplay.c:3670
int height
Definition: ffplay.c:164
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3606
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3458
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1761
int frame_drops_late
Definition: ffplay.c:261
struct AudioParams audio_src
Definition: ffplay.c:254
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3204
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1336
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:328
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2274
int last_i_start
Definition: ffplay.c:268
uint16_t format
Definition: avcodec.h:3957
#define OPT_INT64
Definition: cmdutils.h:175
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1451
int n
Definition: avisynth_c.h:684
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2317
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:353
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:788
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2712
static const AVFilterPad outputs[]
Definition: af_afftfilt.c:386
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:449
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3939
static int decoder_reorder_pts
Definition: ffplay.c:331
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1330
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:266
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:966
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:312
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:859
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:701
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3613
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1191
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:305
Stream structure.
Definition: avformat.h:889
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3435
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1239
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:961
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:248
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4684
char * filename
Definition: ffplay.c:292
static int screen_height
Definition: ffplay.c:317
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3507
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:231
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
static const AVFilterPad inputs[]
Definition: af_afftfilt.c:376
int64_t next_pts
Definition: ffplay.c:200
static int autoexit
Definition: ffplay.c:332
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:267
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:167
enum AVMediaType codec_type
Definition: avcodec.h:1684
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:756
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:829
attribute_deprecated int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2213
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1693
static void do_exit(VideoState *is)
Definition: ffplay.c:1214
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:254
int sample_rate
samples per second
Definition: avcodec.h:2438
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:215
AVIOContext * pb
I/O context.
Definition: avformat.h:1380
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:901
static int loop
Definition: ffplay.c:335
int last_paused
Definition: ffplay.c:211
static int exit_on_keydown
Definition: ffplay.c:333
FFT functions.
main external API structure.
Definition: avcodec.h:1676
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:3127
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:567
Decoder subdec
Definition: ffplay.c:231
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:264
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:140
double max_frame_duration
Definition: ffplay.c:287
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2748
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:263
Clock vidclk
Definition: ffplay.c:222
int x
Definition: f_ebur128.c:91
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:753
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_log(ac->avr, AV_LOG_TRACE,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:493
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1489
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:313
double pts
Definition: ffplay.c:144
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:696
static int audio_thread(void *arg)
Definition: ffplay.c:2007
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static int av_sync_type
Definition: ffplay.c:325
int pkt_serial
Definition: ffplay.c:194
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:684
static const char * format
Definition: movenc.c:47
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:118
int sample_rate
Sample rate of the audio data.
Definition: frame.h:348
int configure_filtergraph(FilterGraph *fg)
static void free_picture(Frame *vp)
Definition: ffplay.c:799
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:969
PacketQueue audioq
Definition: ffplay.c:244
int packet_pending
Definition: ffplay.c:196
static int video_open(VideoState *is, Frame *vp)
Definition: ffplay.c:1249
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
int64_t seek_pos
Definition: ffplay.c:215
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
int allocated
Definition: ffplay.c:162
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:289
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:276
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:158
#define OPT_STRING
Definition: cmdutils.h:169
static void video_audio_display(VideoState *s)
Definition: ffplay.c:980
SDL_cond * cond
Definition: ffplay.c:126
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2387
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:557
AVMediaType
Definition: avutil.h:193
discard useless packets like 0 size packets in avi
Definition: avcodec.h:782
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2719
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1393
int queue_attachments_req
Definition: ffplay.c:212
unsigned nb_filters
Definition: avfilter.h:789
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1241
#define snprintf
Definition: snprintf.h:34
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:644
int error
contains the error code or 0 if no error happened
Definition: avio.h:228
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:707
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:197
#define FF_ALLOC_EVENT
Definition: ffplay.c:358
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1676
int audio_stream
Definition: ffplay.c:233
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:262
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2435
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:137
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2547
char * name
unique name for this input/output in the list
Definition: avfilter.h:963
static int64_t cursor_last_shown
Definition: ffplay.c:343
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:650
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3452
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:493
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:79
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:3132
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1698
static int flags
Definition: cpu.c:47
SDL_Texture * sub_texture
Definition: ffplay.c:275
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1423
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:198
int frame_drops_early
Definition: ffplay.c:260
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2269
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
int sample_array_index
Definition: ffplay.c:267
SDL_cond * continue_read_thread
Definition: ffplay.c:307
int64_t start
Definition: avformat.h:1298
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:671
int sample_rate
Audio only.
Definition: avcodec.h:4090
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:770
#define OPT_BOOL
Definition: cmdutils.h:167
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:284
double speed
Definition: ffplay.c:147
static int exit_on_mousedown
Definition: ffplay.c:334
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:1097
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1046
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static int video_thread(void *arg)
Definition: ffplay.c:2105
#define OPT_INT
Definition: cmdutils.h:172
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:182
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1241
AVDictionary * codec_opts
Definition: cmdutils.c:72
struct AudioParams audio_tgt
Definition: ffplay.c:258
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:6150
if(ret< 0)
Definition: vf_mcdeint.c:282
uint8_t * audio_buf
Definition: ffplay.c:246
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
int muted
Definition: ffplay.c:253
static int display_disable
Definition: ffplay.c:323
static int video_disable
Definition: ffplay.c:319
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3336
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:936
signed 16 bits
Definition: samplefmt.h:61
int audio_buf_index
Definition: ffplay.c:250
uint8_t * audio_buf1
Definition: ffplay.c:247
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3501
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
static int screen_width
Definition: ffplay.c:316
PacketQueue * pktq
Definition: ffplay.c:180
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:947
uint32_t start_display_time
Definition: avcodec.h:3958
FFTSample * rdft_data
Definition: ffplay.c:271
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1501
int audio_clock_serial
Definition: ffplay.c:238
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1297
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(constuint8_t *) pi-0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(constint16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(constint32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(constint64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64,*(constint64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(constfloat *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(constdouble *) pi *(INT64_C(1)<< 63)))#defineFMT_PAIR_FUNC(out, in) staticconv_func_type *constfmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64),};staticvoidcpy1(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, len);}staticvoidcpy2(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 2 *len);}staticvoidcpy4(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 4 *len);}staticvoidcpy8(uint8_t **dst, constuint8_t **src, intlen){memcpy(*dst,*src, 8 *len);}AudioConvert *swri_audio_convert_alloc(enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, constint *ch_map, intflags){AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) returnNULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) returnNULL;if(channels==1){in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);}ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map){switch(av_get_bytes_per_sample(in_fmt)){case1:ctx->simd_f=cpy1;break;case2:ctx->simd_f=cpy2;break;case4:ctx->simd_f=cpy4;break;case8:ctx->simd_f=cpy8;break;}}if(HAVE_YASM &&1) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);returnctx;}voidswri_audio_convert_free(AudioConvert **ctx){av_freep(ctx);}intswri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, intlen){intch;intoff=0;constintos=(out->planar?1:out->ch_count)*out->bps;unsignedmisaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask){intplanes=in->planar?in->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;}if(ctx->out_simd_align_mask){intplanes=out->planar?out->ch_count:1;unsignedm=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;}if(ctx->simd_f &&!ctx->ch_map &&!misaligned){off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){if(out->planar==in->planar){intplanes=out->planar?out->ch_count:1;for(ch=0;ch< planes;ch++){ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:83
PacketQueue subtitleq
Definition: ffplay.c:279
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1350
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4165
static int lowres
Definition: ffplay.c:330
int eof
Definition: ffplay.c:290
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:572
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:337
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:494
double duration
Definition: ffplay.c:159
int pixels
Definition: avisynth_c.h:429
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:87
int eof_reached
true if eof reached
Definition: avio.h:222
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:2439
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:249
SDL_Thread * read_tid
Definition: ffplay.c:206
AVPacket pkt
Definition: ffplay.c:190
int frame_size
Definition: ffplay.c:139
void av_log_set_flags(int arg)
Definition: log.c:396
int64_t start_pts
Definition: ffplay.c:198
int abort_request
Definition: ffplay.c:208
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:779
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:424
double last_updated
Definition: ffplay.c:146
Decoder viddec
Definition: ffplay.c:230
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:71
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:503
int height
Definition: ffplay.c:293
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:199
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1778
static void update_volume(VideoState *is, int sign, int step)
Definition: ffplay.c:1446
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:522
int channels
Audio only.
Definition: avcodec.h:4086
An instance of a filter.
Definition: avfilter.h:307
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1600
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1433
int height
Definition: frame.h:236
FILE * out
Definition: movenc.c:54
static const char * video_codec_name
Definition: ffplay.c:341
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3418
PacketQueue * queue
Definition: ffplay.c:192
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:664
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:706
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int subtitle_thread(void *arg)
Definition: ffplay.c:2214
FrameQueue subpq
Definition: ffplay.c:226
int format
Definition: ffplay.c:165
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Definition: avformat.h:1241
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1375
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:174
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:328
#define FF_QUIT_EVENT
Definition: ffplay.c:359
int xleft
Definition: ffplay.c:293
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2182
int stream_index
Definition: avcodec.h:1603
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:926
int subtitle_stream
Definition: ffplay.c:277
unsigned int audio_buf_size
Definition: ffplay.c:248
int64_t seek_rel
Definition: ffplay.c:216
int realtime
Definition: ffplay.c:219
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:231
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:949
static void video_display(VideoState *is)
Definition: ffplay.c:1296
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:321
static int show_status
Definition: ffplay.c:324
static int compute_mod(int a, int b)
Definition: ffplay.c:975
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1578
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:44
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:431
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2430
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:241
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1350
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:959
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3186
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1594
double last_vis_time
Definition: ffplay.c:273
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:976
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:242
#define av_unused
Definition: attributes.h:126
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:155
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
AVFormatContext * ic
Definition: ffplay.c:218
simple arithmetic expression evaluator
int audio_volume
Definition: ffplay.c:252
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:722