FFmpeg
sync_queue.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include <stdint.h>
20 #include <string.h>
21 
22 #include "libavutil/avassert.h"
24 #include "libavutil/cpu.h"
25 #include "libavutil/error.h"
26 #include "libavutil/fifo.h"
27 #include "libavutil/mathematics.h"
28 #include "libavutil/mem.h"
29 #include "libavutil/samplefmt.h"
30 #include "libavutil/timestamp.h"
31 
32 #include "objpool.h"
33 #include "sync_queue.h"
34 
35 /*
36  * How this works:
37  * --------------
38  * time: 0 1 2 3 4 5 6 7 8 9 10 11 12 13
39  * -------------------------------------------------------------------
40  * | | | | | | | | | | | | | |
41  * | ┌───┐┌────────┐┌───┐┌─────────────┐
42  * stream 0| │d=1││ d=2 ││d=1││ d=3 │
43  * | └───┘└────────┘└───┘└─────────────┘
44  * ┌───┐ ┌───────────────────────┐
45  * stream 1│d=1│ │ d=5 │
46  * └───┘ └───────────────────────┘
47  * | ┌───┐┌───┐┌───┐┌───┐
48  * stream 2| │d=1││d=1││d=1││d=1│ <- stream 2 is the head stream of the queue
49  * | └───┘└───┘└───┘└───┘
50  * ^ ^
51  * [stream 2 tail] [stream 2 head]
52  *
53  * We have N streams (N=3 in the diagram), each stream is a FIFO. The *tail* of
54  * each FIFO is the frame with smallest end time, the *head* is the frame with
55  * the largest end time. Frames submitted to the queue with sq_send() are placed
56  * after the head, frames returned to the caller with sq_receive() are taken
57  * from the tail.
58  *
59  * The head stream of the whole queue (SyncQueue.head_stream) is the limiting
60  * stream with the *smallest* head timestamp, i.e. the stream whose source lags
61  * furthest behind all other streams. It determines which frames can be output
62  * from the queue.
63  *
64  * In the diagram, the head stream is 2, because it head time is t=5, while
65  * streams 0 and 1 end at t=8 and t=9 respectively. All frames that _end_ at
66  * or before t=5 can be output, i.e. the first 3 frames from stream 0, first
67  * frame from stream 1, and all 4 frames from stream 2.
68  */
69 
70 typedef struct SyncQueueStream {
73 
74  /* number of audio samples in fifo */
75  uint64_t samples_queued;
76  /* stream head: largest timestamp seen */
77  int64_t head_ts;
78  int limiting;
79  /* no more frames will be sent for this stream */
80  int finished;
81 
82  uint64_t frames_sent;
83  uint64_t samples_sent;
84  uint64_t frames_max;
87 
88 struct SyncQueue {
90 
91  void *logctx;
92 
93  /* no more frames will be sent for any stream */
94  int finished;
95  /* sync head: the stream with the _smallest_ head timestamp
96  * this stream determines which frames can be output */
98  /* the finished stream with the smallest finish timestamp or -1 */
100 
101  // maximum buffering duration in microseconds
102  int64_t buf_size_us;
103 
105  unsigned int nb_streams;
106 
107  // pool of preallocated frames to avoid constant allocations
109 
111 
112  uintptr_t align_mask;
113 };
114 
115 static void frame_move(const SyncQueue *sq, SyncQueueFrame dst,
117 {
118  if (sq->type == SYNC_QUEUE_PACKETS)
119  av_packet_move_ref(dst.p, src.p);
120  else
121  av_frame_move_ref(dst.f, src.f);
122 }
123 
124 /**
125  * Compute the end timestamp of a frame. If nb_samples is provided, consider
126  * the frame to have this number of audio samples, otherwise use frame duration.
127  */
128 static int64_t frame_end(const SyncQueue *sq, SyncQueueFrame frame, int nb_samples)
129 {
130  if (nb_samples) {
131  int64_t d = av_rescale_q(nb_samples, (AVRational){ 1, frame.f->sample_rate},
132  frame.f->time_base);
133  return frame.f->pts + d;
134  }
135 
136  return (sq->type == SYNC_QUEUE_PACKETS) ?
137  frame.p->pts + frame.p->duration :
138  frame.f->pts + frame.f->duration;
139 }
140 
142 {
143  return (sq->type == SYNC_QUEUE_PACKETS) ? 0 : frame.f->nb_samples;
144 }
145 
146 static int frame_null(const SyncQueue *sq, SyncQueueFrame frame)
147 {
148  return (sq->type == SYNC_QUEUE_PACKETS) ? (frame.p == NULL) : (frame.f == NULL);
149 }
150 
151 static void tb_update(const SyncQueue *sq, SyncQueueStream *st,
152  const SyncQueueFrame frame)
153 {
154  AVRational tb = (sq->type == SYNC_QUEUE_PACKETS) ?
155  frame.p->time_base : frame.f->time_base;
156 
157  av_assert0(tb.num > 0 && tb.den > 0);
158 
159  if (tb.num == st->tb.num && tb.den == st->tb.den)
160  return;
161 
162  // timebase should not change after the first frame
164 
165  if (st->head_ts != AV_NOPTS_VALUE)
166  st->head_ts = av_rescale_q(st->head_ts, st->tb, tb);
167 
168  st->tb = tb;
169 }
170 
171 static void finish_stream(SyncQueue *sq, unsigned int stream_idx)
172 {
173  SyncQueueStream *st = &sq->streams[stream_idx];
174 
175  if (!st->finished)
177  "sq: finish %u; head ts %s\n", stream_idx,
178  av_ts2timestr(st->head_ts, &st->tb));
179 
180  st->finished = 1;
181 
182  if (st->limiting && st->head_ts != AV_NOPTS_VALUE) {
183  /* check if this stream is the new finished head */
184  if (sq->head_finished_stream < 0 ||
185  av_compare_ts(st->head_ts, st->tb,
187  sq->streams[sq->head_finished_stream].tb) < 0) {
188  sq->head_finished_stream = stream_idx;
189  }
190 
191  /* mark as finished all streams that should no longer receive new frames,
192  * due to them being ahead of some finished stream */
193  st = &sq->streams[sq->head_finished_stream];
194  for (unsigned int i = 0; i < sq->nb_streams; i++) {
195  SyncQueueStream *st1 = &sq->streams[i];
196  if (st != st1 && st1->head_ts != AV_NOPTS_VALUE &&
197  av_compare_ts(st->head_ts, st->tb, st1->head_ts, st1->tb) <= 0) {
198  if (!st1->finished)
200  "sq: finish secondary %u; head ts %s\n", i,
201  av_ts2timestr(st1->head_ts, &st1->tb));
202 
203  st1->finished = 1;
204  }
205  }
206  }
207 
208  /* mark the whole queue as finished if all streams are finished */
209  for (unsigned int i = 0; i < sq->nb_streams; i++) {
210  if (!sq->streams[i].finished)
211  return;
212  }
213  sq->finished = 1;
214 
215  av_log(sq->logctx, AV_LOG_DEBUG, "sq: finish queue\n");
216 }
217 
218 static void queue_head_update(SyncQueue *sq)
219 {
221 
222  if (sq->head_stream < 0) {
223  unsigned first_limiting = UINT_MAX;
224 
225  /* wait for one timestamp in each stream before determining
226  * the queue head */
227  for (unsigned int i = 0; i < sq->nb_streams; i++) {
228  SyncQueueStream *st = &sq->streams[i];
229  if (!st->limiting)
230  continue;
231  if (st->head_ts == AV_NOPTS_VALUE)
232  return;
233  if (first_limiting == UINT_MAX)
234  first_limiting = i;
235  }
236 
237  // placeholder value, correct one will be found below
238  av_assert0(first_limiting < UINT_MAX);
239  sq->head_stream = first_limiting;
240  }
241 
242  for (unsigned int i = 0; i < sq->nb_streams; i++) {
243  SyncQueueStream *st_head = &sq->streams[sq->head_stream];
244  SyncQueueStream *st_other = &sq->streams[i];
245  if (st_other->limiting && st_other->head_ts != AV_NOPTS_VALUE &&
246  av_compare_ts(st_other->head_ts, st_other->tb,
247  st_head->head_ts, st_head->tb) < 0)
248  sq->head_stream = i;
249  }
250 }
251 
252 /* update this stream's head timestamp */
253 static void stream_update_ts(SyncQueue *sq, unsigned int stream_idx, int64_t ts)
254 {
255  SyncQueueStream *st = &sq->streams[stream_idx];
256 
257  if (ts == AV_NOPTS_VALUE ||
258  (st->head_ts != AV_NOPTS_VALUE && st->head_ts >= ts))
259  return;
260 
261  st->head_ts = ts;
262 
263  /* if this stream is now ahead of some finished stream, then
264  * this stream is also finished */
265  if (sq->head_finished_stream >= 0 &&
268  ts, st->tb) <= 0)
269  finish_stream(sq, stream_idx);
270 
271  /* update the overall head timestamp if it could have changed */
272  if (st->limiting &&
273  (sq->head_stream < 0 || sq->head_stream == stream_idx))
274  queue_head_update(sq);
275 }
276 
277 /* If the queue for the given stream (or all streams when stream_idx=-1)
278  * is overflowing, trigger a fake heartbeat on lagging streams.
279  *
280  * @return 1 if heartbeat triggered, 0 otherwise
281  */
282 static int overflow_heartbeat(SyncQueue *sq, int stream_idx)
283 {
284  SyncQueueStream *st;
286  int64_t tail_ts = AV_NOPTS_VALUE;
287 
288  /* if no stream specified, pick the one that is most ahead */
289  if (stream_idx < 0) {
290  int64_t ts = AV_NOPTS_VALUE;
291 
292  for (int i = 0; i < sq->nb_streams; i++) {
293  st = &sq->streams[i];
294  if (st->head_ts != AV_NOPTS_VALUE &&
295  (ts == AV_NOPTS_VALUE ||
296  av_compare_ts(ts, sq->streams[stream_idx].tb,
297  st->head_ts, st->tb) < 0)) {
298  ts = st->head_ts;
299  stream_idx = i;
300  }
301  }
302  /* no stream has a timestamp yet -> nothing to do */
303  if (stream_idx < 0)
304  return 0;
305  }
306 
307  st = &sq->streams[stream_idx];
308 
309  /* get the chosen stream's tail timestamp */
310  for (size_t i = 0; tail_ts == AV_NOPTS_VALUE &&
311  av_fifo_peek(st->fifo, &frame, 1, i) >= 0; i++)
312  tail_ts = frame_end(sq, frame, 0);
313 
314  /* overflow triggers when the tail is over specified duration behind the head */
315  if (tail_ts == AV_NOPTS_VALUE || tail_ts >= st->head_ts ||
316  av_rescale_q(st->head_ts - tail_ts, st->tb, AV_TIME_BASE_Q) < sq->buf_size_us)
317  return 0;
318 
319  /* signal a fake timestamp for all streams that prevent tail_ts from being output */
320  tail_ts++;
321  for (unsigned int i = 0; i < sq->nb_streams; i++) {
322  const SyncQueueStream *st1 = &sq->streams[i];
323  int64_t ts;
324 
325  if (st == st1 || st1->finished ||
326  (st1->head_ts != AV_NOPTS_VALUE &&
327  av_compare_ts(tail_ts, st->tb, st1->head_ts, st1->tb) <= 0))
328  continue;
329 
330  ts = av_rescale_q(tail_ts, st->tb, st1->tb);
331  if (st1->head_ts != AV_NOPTS_VALUE)
332  ts = FFMAX(st1->head_ts + 1, ts);
333 
334  av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u overflow heardbeat %s -> %s\n",
335  i, av_ts2timestr(st1->head_ts, &st1->tb), av_ts2timestr(ts, &st1->tb));
336 
337  stream_update_ts(sq, i, ts);
338  }
339 
340  return 1;
341 }
342 
343 int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
344 {
345  SyncQueueStream *st;
346  SyncQueueFrame dst;
347  int64_t ts;
348  int ret, nb_samples;
349 
350  av_assert0(stream_idx < sq->nb_streams);
351  st = &sq->streams[stream_idx];
352 
353  if (frame_null(sq, frame)) {
354  av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u EOF\n", stream_idx);
355  finish_stream(sq, stream_idx);
356  return 0;
357  }
358  if (st->finished)
359  return AVERROR_EOF;
360 
361  tb_update(sq, st, frame);
362 
363  ret = objpool_get(sq->pool, (void**)&dst);
364  if (ret < 0)
365  return ret;
366 
367  frame_move(sq, dst, frame);
368 
369  nb_samples = frame_samples(sq, dst);
370  // make sure frame duration is consistent with sample count
371  if (nb_samples) {
372  av_assert0(dst.f->sample_rate > 0);
373  dst.f->duration = av_rescale_q(nb_samples, (AVRational){ 1, dst.f->sample_rate },
374  dst.f->time_base);
375  }
376 
377  ts = frame_end(sq, dst, 0);
378 
379  av_log(sq->logctx, AV_LOG_DEBUG, "sq: send %u ts %s\n", stream_idx,
380  av_ts2timestr(ts, &st->tb));
381 
382  ret = av_fifo_write(st->fifo, &dst, 1);
383  if (ret < 0) {
384  frame_move(sq, frame, dst);
385  objpool_release(sq->pool, (void**)&dst);
386  return ret;
387  }
388 
389  stream_update_ts(sq, stream_idx, ts);
390 
391  st->samples_queued += nb_samples;
392  st->samples_sent += nb_samples;
393 
394  if (st->frame_samples)
395  st->frames_sent = st->samples_sent / st->frame_samples;
396  else
397  st->frames_sent++;
398 
399  if (st->frames_sent >= st->frames_max) {
400  av_log(sq->logctx, AV_LOG_DEBUG, "sq: %u frames_max %"PRIu64" reached\n",
401  stream_idx, st->frames_max);
402 
403  finish_stream(sq, stream_idx);
404  }
405 
406  return 0;
407 }
408 
409 static void offset_audio(AVFrame *f, int nb_samples)
410 {
411  const int planar = av_sample_fmt_is_planar(f->format);
412  const int planes = planar ? f->ch_layout.nb_channels : 1;
413  const int bps = av_get_bytes_per_sample(f->format);
414  const int offset = nb_samples * bps * (planar ? 1 : f->ch_layout.nb_channels);
415 
416  av_assert0(bps > 0);
417  av_assert0(nb_samples < f->nb_samples);
418 
419  for (int i = 0; i < planes; i++) {
420  f->extended_data[i] += offset;
421  if (i < FF_ARRAY_ELEMS(f->data))
422  f->data[i] = f->extended_data[i];
423  }
424  f->linesize[0] -= offset;
425  f->nb_samples -= nb_samples;
426  f->duration = av_rescale_q(f->nb_samples, (AVRational){ 1, f->sample_rate },
427  f->time_base);
428  f->pts += av_rescale_q(nb_samples, (AVRational){ 1, f->sample_rate },
429  f->time_base);
430 }
431 
432 static int frame_is_aligned(const SyncQueue *sq, const AVFrame *frame)
433 {
434  // only checks linesize[0], so only works for audio
435  av_assert0(frame->nb_samples > 0);
436  av_assert0(sq->align_mask);
437 
438  // only check data[0], because we always offset all data pointers
439  // by the same offset, so if one is aligned, all are
440  if (!((uintptr_t)frame->data[0] & sq->align_mask) &&
441  !(frame->linesize[0] & sq->align_mask) &&
442  frame->linesize[0] > sq->align_mask)
443  return 1;
444 
445  return 0;
446 }
447 
449  AVFrame *dst, int nb_samples)
450 {
452  int ret;
453 
454  av_assert0(st->samples_queued >= nb_samples);
455 
456  ret = av_fifo_peek(st->fifo, &src, 1, 0);
457  av_assert0(ret >= 0);
458 
459  // peeked frame has enough samples and its data is aligned
460  // -> we can just make a reference and limit its sample count
461  if (src.f->nb_samples > nb_samples && frame_is_aligned(sq, src.f)) {
462  ret = av_frame_ref(dst, src.f);
463  if (ret < 0)
464  return ret;
465 
466  dst->nb_samples = nb_samples;
467  offset_audio(src.f, nb_samples);
468  st->samples_queued -= nb_samples;
469 
470  goto finish;
471  }
472 
473  // otherwise allocate a new frame and copy the data
474  ret = av_channel_layout_copy(&dst->ch_layout, &src.f->ch_layout);
475  if (ret < 0)
476  return ret;
477 
478  dst->format = src.f->format;
479  dst->nb_samples = nb_samples;
480 
481  ret = av_frame_get_buffer(dst, 0);
482  if (ret < 0)
483  goto fail;
484 
485  ret = av_frame_copy_props(dst, src.f);
486  if (ret < 0)
487  goto fail;
488 
489  dst->nb_samples = 0;
490  while (dst->nb_samples < nb_samples) {
491  int to_copy;
492 
493  ret = av_fifo_peek(st->fifo, &src, 1, 0);
494  av_assert0(ret >= 0);
495 
496  to_copy = FFMIN(nb_samples - dst->nb_samples, src.f->nb_samples);
497 
498  av_samples_copy(dst->extended_data, src.f->extended_data, dst->nb_samples,
499  0, to_copy, dst->ch_layout.nb_channels, dst->format);
500 
501  if (to_copy < src.f->nb_samples)
502  offset_audio(src.f, to_copy);
503  else {
504  av_frame_unref(src.f);
505  objpool_release(sq->pool, (void**)&src);
506  av_fifo_drain2(st->fifo, 1);
507  }
508  st->samples_queued -= to_copy;
509 
510  dst->nb_samples += to_copy;
511  }
512 
513 finish:
514  dst->duration = av_rescale_q(nb_samples, (AVRational){ 1, dst->sample_rate },
515  dst->time_base);
516 
517  return 0;
518 
519 fail:
520  av_frame_unref(dst);
521  return ret;
522 }
523 
524 static int receive_for_stream(SyncQueue *sq, unsigned int stream_idx,
526 {
527  const SyncQueueStream *st_head = sq->head_stream >= 0 ?
528  &sq->streams[sq->head_stream] : NULL;
529  SyncQueueStream *st;
530 
531  av_assert0(stream_idx < sq->nb_streams);
532  st = &sq->streams[stream_idx];
533 
534  if (av_fifo_can_read(st->fifo) &&
535  (st->frame_samples <= st->samples_queued || st->finished)) {
536  int nb_samples = st->frame_samples;
538  int64_t ts;
539  int cmp = 1;
540 
541  if (st->finished)
542  nb_samples = FFMIN(nb_samples, st->samples_queued);
543 
544  av_fifo_peek(st->fifo, &peek, 1, 0);
545  ts = frame_end(sq, peek, nb_samples);
546 
547  /* check if this stream's tail timestamp does not overtake
548  * the overall queue head */
549  if (ts != AV_NOPTS_VALUE && st_head)
550  cmp = av_compare_ts(ts, st->tb, st_head->head_ts, st_head->tb);
551 
552  /* We can release frames that do not end after the queue head.
553  * Frames with no timestamps are just passed through with no conditions.
554  * Frames are also passed through when there are no limiting streams.
555  */
556  if (cmp <= 0 || ts == AV_NOPTS_VALUE || !sq->have_limiting) {
557  if (nb_samples &&
558  (nb_samples != peek.f->nb_samples || !frame_is_aligned(sq, peek.f))) {
559  int ret = receive_samples(sq, st, frame.f, nb_samples);
560  if (ret < 0)
561  return ret;
562  } else {
563  frame_move(sq, frame, peek);
564  objpool_release(sq->pool, (void**)&peek);
565  av_fifo_drain2(st->fifo, 1);
567  st->samples_queued -= frame_samples(sq, frame);
568  }
569 
571  "sq: receive %u ts %s queue head %d ts %s\n", stream_idx,
572  av_ts2timestr(frame_end(sq, frame, 0), &st->tb),
573  sq->head_stream,
574  st_head ? av_ts2timestr(st_head->head_ts, &st_head->tb) : "N/A");
575 
576  return 0;
577  }
578  }
579 
580  return (sq->finished || (st->finished && !av_fifo_can_read(st->fifo))) ?
581  AVERROR_EOF : AVERROR(EAGAIN);
582 }
583 
584 static int receive_internal(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
585 {
586  int nb_eof = 0;
587  int ret;
588 
589  /* read a frame for a specific stream */
590  if (stream_idx >= 0) {
591  ret = receive_for_stream(sq, stream_idx, frame);
592  return (ret < 0) ? ret : stream_idx;
593  }
594 
595  /* read a frame for any stream with available output */
596  for (unsigned int i = 0; i < sq->nb_streams; i++) {
597  ret = receive_for_stream(sq, i, frame);
598  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN)) {
599  nb_eof += (ret == AVERROR_EOF);
600  continue;
601  }
602  return (ret < 0) ? ret : i;
603  }
604 
605  return (nb_eof == sq->nb_streams) ? AVERROR_EOF : AVERROR(EAGAIN);
606 }
607 
608 int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
609 {
610  int ret = receive_internal(sq, stream_idx, frame);
611 
612  /* try again if the queue overflowed and triggered a fake heartbeat
613  * for lagging streams */
614  if (ret == AVERROR(EAGAIN) && overflow_heartbeat(sq, stream_idx))
615  ret = receive_internal(sq, stream_idx, frame);
616 
617  return ret;
618 }
619 
620 int sq_add_stream(SyncQueue *sq, int limiting)
621 {
622  SyncQueueStream *tmp, *st;
623 
624  tmp = av_realloc_array(sq->streams, sq->nb_streams + 1, sizeof(*sq->streams));
625  if (!tmp)
626  return AVERROR(ENOMEM);
627  sq->streams = tmp;
628 
629  st = &sq->streams[sq->nb_streams];
630  memset(st, 0, sizeof(*st));
631 
633  if (!st->fifo)
634  return AVERROR(ENOMEM);
635 
636  /* we set a valid default, so that a pathological stream that never
637  * receives even a real timebase (and no frames) won't stall all other
638  * streams forever; cf. overflow_heartbeat() */
639  st->tb = (AVRational){ 1, 1 };
640  st->head_ts = AV_NOPTS_VALUE;
641  st->frames_max = UINT64_MAX;
642  st->limiting = limiting;
643 
644  sq->have_limiting |= limiting;
645 
646  return sq->nb_streams++;
647 }
648 
649 void sq_limit_frames(SyncQueue *sq, unsigned int stream_idx, uint64_t frames)
650 {
651  SyncQueueStream *st;
652 
653  av_assert0(stream_idx < sq->nb_streams);
654  st = &sq->streams[stream_idx];
655 
656  st->frames_max = frames;
657  if (st->frames_sent >= st->frames_max)
658  finish_stream(sq, stream_idx);
659 }
660 
661 void sq_frame_samples(SyncQueue *sq, unsigned int stream_idx,
662  int frame_samples)
663 {
664  SyncQueueStream *st;
665 
667  av_assert0(stream_idx < sq->nb_streams);
668  st = &sq->streams[stream_idx];
669 
671 
672  sq->align_mask = av_cpu_max_align() - 1;
673 }
674 
675 SyncQueue *sq_alloc(enum SyncQueueType type, int64_t buf_size_us, void *logctx)
676 {
677  SyncQueue *sq = av_mallocz(sizeof(*sq));
678 
679  if (!sq)
680  return NULL;
681 
682  sq->type = type;
683  sq->buf_size_us = buf_size_us;
684  sq->logctx = logctx;
685 
686  sq->head_stream = -1;
687  sq->head_finished_stream = -1;
688 
691  if (!sq->pool) {
692  av_freep(&sq);
693  return NULL;
694  }
695 
696  return sq;
697 }
698 
699 void sq_free(SyncQueue **psq)
700 {
701  SyncQueue *sq = *psq;
702 
703  if (!sq)
704  return;
705 
706  for (unsigned int i = 0; i < sq->nb_streams; i++) {
708  while (av_fifo_read(sq->streams[i].fifo, &frame, 1) >= 0)
709  objpool_release(sq->pool, (void**)&frame);
710 
711  av_fifo_freep2(&sq->streams[i].fifo);
712  }
713 
714  av_freep(&sq->streams);
715 
716  objpool_free(&sq->pool);
717 
718  av_freep(psq);
719 }
frame_samples
static int frame_samples(const SyncQueue *sq, SyncQueueFrame frame)
Definition: sync_queue.c:141
av_samples_copy
int av_samples_copy(uint8_t *const *dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:222
SYNC_QUEUE_PACKETS
@ SYNC_QUEUE_PACKETS
Definition: sync_queue.h:29
av_fifo_drain2
void av_fifo_drain2(AVFifo *f, size_t size)
Discard the specified amount of data from an AVFifo.
Definition: fifo.c:266
SyncQueueStream
Definition: sync_queue.c:70
stream_update_ts
static void stream_update_ts(SyncQueue *sq, unsigned int stream_idx, int64_t ts)
Definition: sync_queue.c:253
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
av_compare_ts
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
SyncQueueFrame::f
AVFrame * f
Definition: sync_queue.h:34
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:288
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:780
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
sq_limit_frames
void sq_limit_frames(SyncQueue *sq, unsigned int stream_idx, uint64_t frames)
Limit the number of output frames for stream with index stream_idx to max_frames.
Definition: sync_queue.c:649
receive_samples
static int receive_samples(SyncQueue *sq, SyncQueueStream *st, AVFrame *dst, int nb_samples)
Definition: sync_queue.c:448
SyncQueue::streams
SyncQueueStream * streams
Definition: sync_queue.c:104
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
SyncQueueType
SyncQueueType
Definition: sync_queue.h:28
av_fifo_peek
int av_fifo_peek(const AVFifo *f, void *buf, size_t nb_elems, size_t offset)
Read data from a FIFO without modifying FIFO state.
Definition: fifo.c:255
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:374
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
sync_queue.h
SyncQueue::head_stream
int head_stream
Definition: sync_queue.c:97
overflow_heartbeat
static int overflow_heartbeat(SyncQueue *sq, int stream_idx)
Definition: sync_queue.c:282
objpool_free
void objpool_free(ObjPool **pop)
Definition: objpool.c:54
tb_update
static void tb_update(const SyncQueue *sq, SyncQueueStream *st, const SyncQueueFrame frame)
Definition: sync_queue.c:151
receive_internal
static int receive_internal(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
Definition: sync_queue.c:584
mathematics.h
FFMAX
#define FFMAX(a, b)
Definition: macros.h:47
SyncQueueStream::finished
int finished
Definition: sync_queue.c:80
peek
static uint32_t BS_FUNC() peek(BSCTX *bc, unsigned int n)
Return n bits from the buffer but do not change the buffer state.
Definition: bitstream_template.h:336
objpool_alloc_packets
ObjPool * objpool_alloc_packets(void)
Definition: objpool.c:124
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
SyncQueueFrame::p
AVPacket * p
Definition: sync_queue.h:35
SyncQueue::head_finished_stream
int head_finished_stream
Definition: sync_queue.c:99
objpool.h
fifo.h
frame_null
static int frame_null(const SyncQueue *sq, SyncQueueFrame frame)
Definition: sync_queue.c:146
finish
static void finish(void)
Definition: movenc.c:373
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
frames
if it could not because there are no more frames
Definition: filter_design.txt:266
samplefmt.h
objpool_release
void objpool_release(ObjPool *op, void **obj)
Definition: objpool.c:78
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:775
sq_receive
int sq_receive(SyncQueue *sq, int stream_idx, SyncQueueFrame frame)
Read a frame from the queue.
Definition: sync_queue.c:608
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
SyncQueueStream::frames_sent
uint64_t frames_sent
Definition: sync_queue.c:82
planar
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1<< 16)) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out->ch+ch,(const uint8_t **) in->ch+ch, off *(out-> planar
Definition: audioconvert.c:56
AVRational::num
int num
Numerator.
Definition: rational.h:59
frame_end
static int64_t frame_end(const SyncQueue *sq, SyncQueueFrame frame, int nb_samples)
Compute the end timestamp of a frame.
Definition: sync_queue.c:128
avassert.h
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
offset_audio
static void offset_audio(AVFrame *f, int nb_samples)
Definition: sync_queue.c:409
SyncQueue::align_mask
uintptr_t align_mask
Definition: sync_queue.c:112
av_realloc_array
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:217
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
av_sample_fmt_is_planar
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:114
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
nb_streams
static int nb_streams
Definition: ffprobe.c:384
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
SyncQueueFrame
Definition: sync_queue.h:33
objpool_alloc_frames
ObjPool * objpool_alloc_frames(void)
Definition: objpool.c:128
cmp
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
Definition: motion_est.c:262
SyncQueueStream::frame_samples
int frame_samples
Definition: sync_queue.c:85
sq_add_stream
int sq_add_stream(SyncQueue *sq, int limiting)
Add a new stream to the sync queue.
Definition: sync_queue.c:620
SyncQueueStream::head_ts
int64_t head_ts
Definition: sync_queue.c:77
NULL
#define NULL
Definition: coverity.c:32
av_frame_copy_props
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:709
SyncQueueStream::frames_max
uint64_t frames_max
Definition: sync_queue.c:84
SyncQueue::nb_streams
unsigned int nb_streams
Definition: sync_queue.c:105
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
frame_move
static void frame_move(const SyncQueue *sq, SyncQueueFrame dst, SyncQueueFrame src)
Definition: sync_queue.c:115
SyncQueueStream::tb
AVRational tb
Definition: sync_queue.c:72
av_fifo_can_read
size_t av_fifo_can_read(const AVFifo *f)
Definition: fifo.c:87
av_cpu_max_align
size_t av_cpu_max_align(void)
Get the maximum data alignment that may be required by FFmpeg.
Definition: cpu.c:268
SyncQueueStream::fifo
AVFifo * fifo
Definition: sync_queue.c:71
av_packet_move_ref
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: packet.c:484
SyncQueue::have_limiting
int have_limiting
Definition: sync_queue.c:110
objpool_get
int objpool_get(ObjPool *op, void **obj)
Definition: objpool.c:67
error.h
sq_frame_samples
void sq_frame_samples(SyncQueue *sq, unsigned int stream_idx, int frame_samples)
Set a constant output audio frame size, in samples.
Definition: sync_queue.c:661
f
f
Definition: af_crystalizer.c:121
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:83
AVFifo
Definition: fifo.c:35
frame_is_aligned
static int frame_is_aligned(const SyncQueue *sq, const AVFrame *frame)
Definition: sync_queue.c:432
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:384
cpu.h
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:573
bps
unsigned bps
Definition: movenc.c:1788
sq_send
int sq_send(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
Submit a frame for the stream with index stream_idx.
Definition: sync_queue.c:343
sq_free
void sq_free(SyncQueue **psq)
Definition: sync_queue.c:699
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
finish_stream
static void finish_stream(SyncQueue *sq, unsigned int stream_idx)
Definition: sync_queue.c:171
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:501
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:461
ObjPool
Definition: objpool.c:30
offset
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf offset
Definition: writing_filters.txt:86
SyncQueueStream::samples_sent
uint64_t samples_sent
Definition: sync_queue.c:83
SyncQueue::logctx
void * logctx
Definition: sync_queue.c:91
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:454
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:256
av_get_bytes_per_sample
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:108
SyncQueue::buf_size_us
int64_t buf_size_us
Definition: sync_queue.c:102
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:435
FFMIN
#define FFMIN(a, b)
Definition: macros.h:49
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:633
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:606
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:256
ret
ret
Definition: filter_design.txt:187
SyncQueueStream::limiting
int limiting
Definition: sync_queue.c:78
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
SyncQueue
A sync queue provides timestamp synchronization between multiple streams.
Definition: sync_queue.c:88
channel_layout.h
AVRational::den
int den
Denominator.
Definition: rational.h:60
SyncQueueStream::samples_queued
uint64_t samples_queued
Definition: sync_queue.c:75
SyncQueue::type
enum SyncQueueType type
Definition: sync_queue.c:89
planes
static const struct @400 planes[]
receive_for_stream
static int receive_for_stream(SyncQueue *sq, unsigned int stream_idx, SyncQueueFrame frame)
Definition: sync_queue.c:524
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:440
mem.h
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
SyncQueue::pool
ObjPool * pool
Definition: sync_queue.c:108
d
d
Definition: ffmpeg_filter.c:424
timestamp.h
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
SYNC_QUEUE_FRAMES
@ SYNC_QUEUE_FRAMES
Definition: sync_queue.h:30
sq_alloc
SyncQueue * sq_alloc(enum SyncQueueType type, int64_t buf_size_us, void *logctx)
Allocate a sync queue of the given type.
Definition: sync_queue.c:675
SyncQueue::finished
int finished
Definition: sync_queue.c:94
queue_head_update
static void queue_head_update(SyncQueue *sq)
Definition: sync_queue.c:218
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:63