FFmpeg
ffmpeg_filter.c
Go to the documentation of this file.
1 /*
2  * ffmpeg filter configuration
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 
23 #include "ffmpeg.h"
24 
25 #include "libavfilter/avfilter.h"
26 #include "libavfilter/buffersink.h"
27 #include "libavfilter/buffersrc.h"
28 
29 #include "libavutil/avassert.h"
30 #include "libavutil/avstring.h"
31 #include "libavutil/bprint.h"
33 #include "libavutil/display.h"
34 #include "libavutil/opt.h"
35 #include "libavutil/pixdesc.h"
36 #include "libavutil/pixfmt.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/samplefmt.h"
39 #include "libavutil/time.h"
40 #include "libavutil/timestamp.h"
41 
42 // FIXME private header, used for mid_pred()
43 #include "libavcodec/mathops.h"
44 
45 typedef struct FilterGraphPriv {
47 
48  // name used for logging
49  char log_name[32];
50 
51  int is_simple;
52  // true when the filtergraph contains only meta filters
53  // that do not modify the frame data
54  int is_meta;
55  // source filters are present in the graph
58 
59  unsigned nb_outputs_done;
60 
61  const char *graph_desc;
62 
63  // frame for temporarily holding output from the filtergraph
65  // frame for sending output to the encoder
67 
69  unsigned sch_idx;
71 
73 {
74  return (FilterGraphPriv*)fg;
75 }
76 
77 static const FilterGraphPriv *cfgp_from_cfg(const FilterGraph *fg)
78 {
79  return (const FilterGraphPriv*)fg;
80 }
81 
82 // data that is local to the filter thread and not visible outside of it
83 typedef struct FilterGraphThread {
85 
87 
88  // Temporary buffer for output frames, since on filtergraph reset
89  // we cannot send them to encoders immediately.
90  // The output index is stored in frame opaque.
92 
93  // index of the next input to request from the scheduler
94  unsigned next_in;
95  // set to 1 after at least one frame passed through this output
96  int got_frame;
97 
98  // EOF status of each input/output, as received by the thread
99  uint8_t *eof_in;
100  uint8_t *eof_out;
102 
103 typedef struct InputFilterPriv {
105 
107 
108  int index;
109 
111 
112  // used to hold submitted input
114 
115  /* for filters that are not yet bound to an input stream,
116  * this stores the input linklabel, if any */
117  uint8_t *linklabel;
118 
119  // filter data type
121  // source data type: AVMEDIA_TYPE_SUBTITLE for sub2video,
122  // same as type otherwise
124 
125  int eof;
126  int bound;
127 
128  // parameters configured for this input
129  int format;
130 
131  int width, height;
135 
138 
140 
142 
144 
147 
148  // fallback parameters to use when no input is ever sent
149  struct {
151 
152  int format;
153 
154  int width;
155  int height;
159 
160  int sample_rate;
162  } fallback;
163 
164  struct {
165  AVFrame *frame;
166 
167  int64_t last_pts;
168  int64_t end_pts;
169 
170  ///< marks if sub2video_update should force an initialization
171  unsigned int initialize;
172  } sub2video;
174 
176 {
177  return (InputFilterPriv*)ifilter;
178 }
179 
180 typedef struct FPSConvContext {
182  /* number of frames emitted by the video-encoding sync code */
183  int64_t frame_number;
184  /* history of nb_frames_prev, i.e. the number of times the
185  * previous frame was duplicated by vsync code in recent
186  * do_video_out() calls */
187  int64_t frames_prev_hist[3];
188 
189  uint64_t dup_warning;
190 
193 
199 
200 typedef struct OutputFilterPriv {
202 
203  int index;
204 
206 
207  /* desired output stream properties */
208  int format;
209  int width, height;
212 
213  // time base in which the output is sent to our downstream
214  // does not need to match the filtersink's timebase
216  // at least one frame with the above timebase was sent
217  // to our downstream, so it cannot change anymore
219 
221 
222  // those are only set if no format is specified and the encoder gives us multiple options
223  // They point directly to the relevant lists of the encoder.
224  const int *formats;
226  const int *sample_rates;
227 
229  // offset for output timestamps, in AV_TIME_BASE_Q
230  int64_t ts_offset;
231  int64_t next_pts;
234 
236 {
237  return (OutputFilterPriv*)ofilter;
238 }
239 
240 typedef struct FilterCommand {
241  char *target;
242  char *command;
243  char *arg;
244 
245  double time;
247 } FilterCommand;
248 
249 static void filter_command_free(void *opaque, uint8_t *data)
250 {
252 
253  av_freep(&fc->target);
254  av_freep(&fc->command);
255  av_freep(&fc->arg);
256 
257  av_free(data);
258 }
259 
261 {
262  AVFrame *frame = ifp->sub2video.frame;
263  int ret;
264 
266 
267  frame->width = ifp->width;
268  frame->height = ifp->height;
269  frame->format = ifp->format;
270  frame->colorspace = ifp->color_space;
271  frame->color_range = ifp->color_range;
272 
274  if (ret < 0)
275  return ret;
276 
277  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
278 
279  return 0;
280 }
281 
282 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
283  AVSubtitleRect *r)
284 {
285  uint32_t *pal, *dst2;
286  uint8_t *src, *src2;
287  int x, y;
288 
289  if (r->type != SUBTITLE_BITMAP) {
290  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
291  return;
292  }
293  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
294  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
295  r->x, r->y, r->w, r->h, w, h
296  );
297  return;
298  }
299 
300  dst += r->y * dst_linesize + r->x * 4;
301  src = r->data[0];
302  pal = (uint32_t *)r->data[1];
303  for (y = 0; y < r->h; y++) {
304  dst2 = (uint32_t *)dst;
305  src2 = src;
306  for (x = 0; x < r->w; x++)
307  *(dst2++) = pal[*(src2++)];
308  dst += dst_linesize;
309  src += r->linesize[0];
310  }
311 }
312 
313 static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
314 {
315  AVFrame *frame = ifp->sub2video.frame;
316  int ret;
317 
318  av_assert1(frame->data[0]);
319  ifp->sub2video.last_pts = frame->pts = pts;
323  if (ret != AVERROR_EOF && ret < 0)
324  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
325  av_err2str(ret));
326 }
327 
328 static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts,
329  const AVSubtitle *sub)
330 {
331  AVFrame *frame = ifp->sub2video.frame;
332  int8_t *dst;
333  int dst_linesize;
334  int num_rects;
335  int64_t pts, end_pts;
336 
337  if (sub) {
338  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
339  AV_TIME_BASE_Q, ifp->time_base);
340  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
341  AV_TIME_BASE_Q, ifp->time_base);
342  num_rects = sub->num_rects;
343  } else {
344  /* If we are initializing the system, utilize current heartbeat
345  PTS as the start time, and show until the following subpicture
346  is received. Otherwise, utilize the previous subpicture's end time
347  as the fall-back value. */
348  pts = ifp->sub2video.initialize ?
349  heartbeat_pts : ifp->sub2video.end_pts;
350  end_pts = INT64_MAX;
351  num_rects = 0;
352  }
353  if (sub2video_get_blank_frame(ifp) < 0) {
355  "Impossible to get a blank canvas.\n");
356  return;
357  }
358  dst = frame->data [0];
359  dst_linesize = frame->linesize[0];
360  for (int i = 0; i < num_rects; i++)
361  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
362  sub2video_push_ref(ifp, pts);
363  ifp->sub2video.end_pts = end_pts;
364  ifp->sub2video.initialize = 0;
365 }
366 
367 /* *dst may return be set to NULL (no pixel format found), a static string or a
368  * string backed by the bprint. Nothing has been written to the AVBPrint in case
369  * NULL is returned. The AVBPrint provided should be clean. */
370 static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint,
371  const char **dst)
372 {
373  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
374  OutputStream *ost = ofilter->ost;
375 
376  *dst = NULL;
377 
378  if (ost->keep_pix_fmt || ofp->format != AV_PIX_FMT_NONE) {
379  *dst = ofp->format == AV_PIX_FMT_NONE ? NULL :
381  } else if (ofp->formats) {
382  const enum AVPixelFormat *p = ofp->formats;
383 
384  for (; *p != AV_PIX_FMT_NONE; p++) {
385  const char *name = av_get_pix_fmt_name(*p);
386  av_bprintf(bprint, "%s%c", name, p[1] == AV_PIX_FMT_NONE ? '\0' : '|');
387  }
388  if (!av_bprint_is_complete(bprint))
389  return AVERROR(ENOMEM);
390 
391  *dst = bprint->str;
392  }
393 
394  return 0;
395 }
396 
397 /* Define a function for appending a list of allowed formats
398  * to an AVBPrint. If nonempty, the list will have a header. */
399 #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
400 static void choose_ ## name (OutputFilterPriv *ofp, AVBPrint *bprint) \
401 { \
402  if (ofp->var == none && !ofp->supported_list) \
403  return; \
404  av_bprintf(bprint, #name "="); \
405  if (ofp->var != none) { \
406  av_bprintf(bprint, printf_format, get_name(ofp->var)); \
407  } else { \
408  const type *p; \
409  \
410  for (p = ofp->supported_list; *p != none; p++) { \
411  av_bprintf(bprint, printf_format "|", get_name(*p)); \
412  } \
413  if (bprint->len > 0) \
414  bprint->str[--bprint->len] = '\0'; \
415  } \
416  av_bprint_chars(bprint, ':', 1); \
417 }
418 
419 //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
420 // GET_PIX_FMT_NAME)
421 
424 
426  "%d", )
427 
428 static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
429 {
430  if (av_channel_layout_check(&ofp->ch_layout)) {
431  av_bprintf(bprint, "channel_layouts=");
432  av_channel_layout_describe_bprint(&ofp->ch_layout, bprint);
433  } else if (ofp->ch_layouts) {
434  const AVChannelLayout *p;
435 
436  av_bprintf(bprint, "channel_layouts=");
437  for (p = ofp->ch_layouts; p->nb_channels; p++) {
439  av_bprintf(bprint, "|");
440  }
441  if (bprint->len > 0)
442  bprint->str[--bprint->len] = '\0';
443  } else
444  return;
445  av_bprint_chars(bprint, ':', 1);
446 }
447 
448 static int read_binary(const char *path, uint8_t **data, int *len)
449 {
450  AVIOContext *io = NULL;
451  int64_t fsize;
452  int ret;
453 
454  *data = NULL;
455  *len = 0;
456 
457  ret = avio_open2(&io, path, AVIO_FLAG_READ, &int_cb, NULL);
458  if (ret < 0) {
459  av_log(NULL, AV_LOG_ERROR, "Cannot open file '%s': %s\n",
460  path, av_err2str(ret));
461  return ret;
462  }
463 
464  fsize = avio_size(io);
465  if (fsize < 0 || fsize > INT_MAX) {
466  av_log(NULL, AV_LOG_ERROR, "Cannot obtain size of file %s\n", path);
467  ret = AVERROR(EIO);
468  goto fail;
469  }
470 
471  *data = av_malloc(fsize);
472  if (!*data) {
473  ret = AVERROR(ENOMEM);
474  goto fail;
475  }
476 
477  ret = avio_read(io, *data, fsize);
478  if (ret != fsize) {
479  av_log(NULL, AV_LOG_ERROR, "Error reading file %s\n", path);
480  ret = ret < 0 ? ret : AVERROR(EIO);
481  goto fail;
482  }
483 
484  *len = fsize;
485 
486  ret = 0;
487 fail:
488  avio_close(io);
489  if (ret < 0) {
490  av_freep(data);
491  *len = 0;
492  }
493  return ret;
494 }
495 
496 static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
497 {
498  const AVOption *o = NULL;
499  int ret;
500 
502  if (ret >= 0)
503  return 0;
504 
505  if (ret == AVERROR_OPTION_NOT_FOUND && key[0] == '/')
507  if (!o)
508  goto err_apply;
509 
510  // key is a valid option name prefixed with '/'
511  // interpret value as a path from which to load the actual option value
512  key++;
513 
514  if (o->type == AV_OPT_TYPE_BINARY) {
515  uint8_t *data;
516  int len;
517 
518  ret = read_binary(val, &data, &len);
519  if (ret < 0)
520  goto err_load;
521 
523  av_freep(&data);
524  } else {
525  char *data = file_read(val);
526  if (!data) {
527  ret = AVERROR(EIO);
528  goto err_load;
529  }
530 
532  av_freep(&data);
533  }
534  if (ret < 0)
535  goto err_apply;
536 
537  return 0;
538 
539 err_apply:
541  "Error applying option '%s' to filter '%s': %s\n",
542  key, f->filter->name, av_err2str(ret));
543  return ret;
544 err_load:
546  "Error loading value for option '%s' from file '%s'\n",
547  key, val);
548  return ret;
549 }
550 
552 {
553  for (size_t i = 0; i < seg->nb_chains; i++) {
554  AVFilterChain *ch = seg->chains[i];
555 
556  for (size_t j = 0; j < ch->nb_filters; j++) {
557  AVFilterParams *p = ch->filters[j];
558  const AVDictionaryEntry *e = NULL;
559 
560  av_assert0(p->filter);
561 
562  while ((e = av_dict_iterate(p->opts, e))) {
563  int ret = filter_opt_apply(p->filter, e->key, e->value);
564  if (ret < 0)
565  return ret;
566  }
567 
568  av_dict_free(&p->opts);
569  }
570  }
571 
572  return 0;
573 }
574 
575 static int graph_parse(AVFilterGraph *graph, const char *desc,
577  AVBufferRef *hw_device)
578 {
580  int ret;
581 
582  *inputs = NULL;
583  *outputs = NULL;
584 
585  ret = avfilter_graph_segment_parse(graph, desc, 0, &seg);
586  if (ret < 0)
587  return ret;
588 
590  if (ret < 0)
591  goto fail;
592 
593  if (hw_device) {
594  for (int i = 0; i < graph->nb_filters; i++) {
595  AVFilterContext *f = graph->filters[i];
596 
597  if (!(f->filter->flags & AVFILTER_FLAG_HWDEVICE))
598  continue;
599  f->hw_device_ctx = av_buffer_ref(hw_device);
600  if (!f->hw_device_ctx) {
601  ret = AVERROR(ENOMEM);
602  goto fail;
603  }
604  }
605  }
606 
607  ret = graph_opts_apply(seg);
608  if (ret < 0)
609  goto fail;
610 
612 
613 fail:
615  return ret;
616 }
617 
618 // Filters can be configured only if the formats of all inputs are known.
620 {
621  for (int i = 0; i < fg->nb_inputs; i++) {
623  if (ifp->format < 0)
624  return 0;
625  }
626  return 1;
627 }
628 
629 static int filter_thread(void *arg);
630 
631 static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
632 {
633  AVFilterContext *ctx = inout->filter_ctx;
634  AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
635  int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
636 
637  if (nb_pads > 1)
638  return av_strdup(ctx->filter->name);
639  return av_asprintf("%s:%s", ctx->filter->name,
640  avfilter_pad_get_name(pads, inout->pad_idx));
641 }
642 
644 {
645  OutputFilterPriv *ofp;
646  OutputFilter *ofilter;
647 
648  ofp = allocate_array_elem(&fg->outputs, sizeof(*ofp), &fg->nb_outputs);
649  if (!ofp)
650  return NULL;
651 
652  ofilter = &ofp->ofilter;
653  ofilter->graph = fg;
654  ofp->format = -1;
655  ofp->index = fg->nb_outputs - 1;
656 
657  return ofilter;
658 }
659 
660 static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
661 {
662  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
663  FilterGraphPriv *fgp = fgp_from_fg(ifilter->graph);
664  int ret, dec_idx;
665 
666  av_assert0(!ifp->bound);
667  ifp->bound = 1;
668 
669  if (ifp->type != ist->par->codec_type &&
671  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s stream to %s filtergraph input\n",
673  return AVERROR(EINVAL);
674  }
675 
676  ifp->type_src = ist->st->codecpar->codec_type;
677 
678  ifp->opts.fallback = av_frame_alloc();
679  if (!ifp->opts.fallback)
680  return AVERROR(ENOMEM);
681 
682  dec_idx = ist_filter_add(ist, ifilter, filtergraph_is_simple(ifilter->graph),
683  &ifp->opts);
684  if (dec_idx < 0)
685  return dec_idx;
686 
687  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
688  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
689  if (ret < 0)
690  return ret;
691 
692  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
693  ifp->sub2video.frame = av_frame_alloc();
694  if (!ifp->sub2video.frame)
695  return AVERROR(ENOMEM);
696 
697  ifp->width = ifp->opts.sub2video_width;
698  ifp->height = ifp->opts.sub2video_height;
699 
700  /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
701  palettes for all rectangles are identical or compatible */
702  ifp->format = AV_PIX_FMT_RGB32;
703 
704  ifp->time_base = AV_TIME_BASE_Q;
705 
706  av_log(fgp, AV_LOG_VERBOSE, "sub2video: using %dx%d canvas\n",
707  ifp->width, ifp->height);
708  }
709 
710  return 0;
711 }
712 
714 {
716  int ret, dec_idx;
717 
718  av_assert0(!ifp->bound);
719  ifp->bound = 1;
720 
721  if (ifp->type != dec->type) {
722  av_log(fgp, AV_LOG_ERROR, "Tried to connect %s decoder to %s filtergraph input\n",
724  return AVERROR(EINVAL);
725  }
726 
727  ifp->type_src = ifp->type;
728 
729  dec_idx = dec_filter_add(dec, &ifp->ifilter, &ifp->opts);
730  if (dec_idx < 0)
731  return dec_idx;
732 
733  ret = sch_connect(fgp->sch, SCH_DEC(dec_idx),
734  SCH_FILTER_IN(fgp->sch_idx, ifp->index));
735  if (ret < 0)
736  return ret;
737 
738  return 0;
739 }
740 
742 {
743  const AVCodec *c = ost->enc_ctx->codec;
744  int i, err;
745 
746  if (ost->enc_ctx->ch_layout.order != AV_CHANNEL_ORDER_UNSPEC) {
747  /* Pass the layout through for all orders but UNSPEC */
748  err = av_channel_layout_copy(&f->ch_layout, &ost->enc_ctx->ch_layout);
749  if (err < 0)
750  return err;
751  return 0;
752  }
753 
754  /* Requested layout is of order UNSPEC */
755  if (!c->ch_layouts) {
756  /* Use the default native layout for the requested amount of channels when the
757  encoder doesn't have a list of supported layouts */
758  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
759  return 0;
760  }
761  /* Encoder has a list of supported layouts. Pick the first layout in it with the
762  same amount of channels as the requested layout */
763  for (i = 0; c->ch_layouts[i].nb_channels; i++) {
764  if (c->ch_layouts[i].nb_channels == ost->enc_ctx->ch_layout.nb_channels)
765  break;
766  }
767  if (c->ch_layouts[i].nb_channels) {
768  /* Use it if one is found */
769  err = av_channel_layout_copy(&f->ch_layout, &c->ch_layouts[i]);
770  if (err < 0)
771  return err;
772  return 0;
773  }
774  /* If no layout for the amount of channels requested was found, use the default
775  native layout for it. */
776  av_channel_layout_default(&f->ch_layout, ost->enc_ctx->ch_layout.nb_channels);
777 
778  return 0;
779 }
780 
782  unsigned sched_idx_enc)
783 {
784  const OutputFile *of = ost->file;
785  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
786  FilterGraph *fg = ofilter->graph;
787  FilterGraphPriv *fgp = fgp_from_fg(fg);
788  const AVCodec *c = ost->enc_ctx->codec;
789  int ret;
790 
791  av_assert0(!ofilter->ost);
792 
793  ofilter->ost = ost;
794  av_freep(&ofilter->linklabel);
795 
796  ofp->ts_offset = of->start_time == AV_NOPTS_VALUE ? 0 : of->start_time;
797  ofp->enc_timebase = ost->enc_timebase;
798 
799  switch (ost->enc_ctx->codec_type) {
800  case AVMEDIA_TYPE_VIDEO:
801  ofp->width = ost->enc_ctx->width;
802  ofp->height = ost->enc_ctx->height;
803  if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
804  ofp->format = ost->enc_ctx->pix_fmt;
805  } else {
806  ofp->formats = c->pix_fmts;
807 
808  // MJPEG encoder exports a full list of supported pixel formats,
809  // but the full-range ones are experimental-only.
810  // Restrict the auto-conversion list unless -strict experimental
811  // has been specified.
812  if (!strcmp(c->name, "mjpeg")) {
813  // FIXME: YUV420P etc. are actually supported with full color range,
814  // yet the latter information isn't available here.
815  static const enum AVPixelFormat mjpeg_formats[] =
817  AV_PIX_FMT_NONE };
818 
819  const AVDictionaryEntry *strict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
820  int strict_val = ost->enc_ctx->strict_std_compliance;
821 
822  if (strict) {
823  const AVOption *o = av_opt_find(ost->enc_ctx, strict->key, NULL, 0, 0);
824  av_assert0(o);
825  av_opt_eval_int(ost->enc_ctx, o, strict->value, &strict_val);
826  }
827 
828  if (strict_val > FF_COMPLIANCE_UNOFFICIAL)
829  ofp->formats = mjpeg_formats;
830  }
831  }
832 
833  fgp->disable_conversions |= ost->keep_pix_fmt;
834 
835  ofp->fps.last_frame = av_frame_alloc();
836  if (!ofp->fps.last_frame)
837  return AVERROR(ENOMEM);
838 
839  ofp->fps.framerate = ost->frame_rate;
840  ofp->fps.framerate_max = ost->max_frame_rate;
841  ofp->fps.framerate_supported = ost->force_fps ?
842  NULL : c->supported_framerates;
843 
844  // reduce frame rate for mpeg4 to be within the spec limits
845  if (c->id == AV_CODEC_ID_MPEG4)
846  ofp->fps.framerate_clip = 65535;
847 
848  ofp->fps.dup_warning = 1000;
849 
850  break;
851  case AVMEDIA_TYPE_AUDIO:
852  if (ost->enc_ctx->sample_fmt != AV_SAMPLE_FMT_NONE) {
853  ofp->format = ost->enc_ctx->sample_fmt;
854  } else {
855  ofp->formats = c->sample_fmts;
856  }
857  if (ost->enc_ctx->sample_rate) {
858  ofp->sample_rate = ost->enc_ctx->sample_rate;
859  } else {
860  ofp->sample_rates = c->supported_samplerates;
861  }
862  if (ost->enc_ctx->ch_layout.nb_channels) {
863  int ret = set_channel_layout(ofp, ost);
864  if (ret < 0)
865  return ret;
866  } else if (c->ch_layouts) {
867  ofp->ch_layouts = c->ch_layouts;
868  }
869  break;
870  }
871 
872  ret = sch_connect(fgp->sch, SCH_FILTER_OUT(fgp->sch_idx, ofp->index),
873  SCH_ENC(sched_idx_enc));
874  if (ret < 0)
875  return ret;
876 
877  return 0;
878 }
879 
881 {
882  InputFilterPriv *ifp;
883  InputFilter *ifilter;
884 
885  ifp = allocate_array_elem(&fg->inputs, sizeof(*ifp), &fg->nb_inputs);
886  if (!ifp)
887  return NULL;
888 
889  ifilter = &ifp->ifilter;
890  ifilter->graph = fg;
891 
892  ifp->frame = av_frame_alloc();
893  if (!ifp->frame)
894  return NULL;
895 
896  ifp->index = fg->nb_inputs - 1;
897  ifp->format = -1;
900 
902  if (!ifp->frame_queue)
903  return NULL;
904 
905  return ifilter;
906 }
907 
908 void fg_free(FilterGraph **pfg)
909 {
910  FilterGraph *fg = *pfg;
911  FilterGraphPriv *fgp;
912 
913  if (!fg)
914  return;
915  fgp = fgp_from_fg(fg);
916 
917  for (int j = 0; j < fg->nb_inputs; j++) {
918  InputFilter *ifilter = fg->inputs[j];
919  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
920 
921  if (ifp->frame_queue) {
922  AVFrame *frame;
923  while (av_fifo_read(ifp->frame_queue, &frame, 1) >= 0)
926  }
928 
929  av_frame_free(&ifp->frame);
930  av_frame_free(&ifp->opts.fallback);
931 
933  av_freep(&ifp->linklabel);
934  av_freep(&ifp->opts.name);
935  av_freep(&ifilter->name);
936  av_freep(&fg->inputs[j]);
937  }
938  av_freep(&fg->inputs);
939  for (int j = 0; j < fg->nb_outputs; j++) {
940  OutputFilter *ofilter = fg->outputs[j];
941  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
942 
944 
945  av_freep(&ofilter->linklabel);
946  av_freep(&ofilter->name);
948  av_freep(&fg->outputs[j]);
949  }
950  av_freep(&fg->outputs);
951  av_freep(&fgp->graph_desc);
952 
953  av_frame_free(&fgp->frame);
954  av_frame_free(&fgp->frame_enc);
955 
956  av_freep(pfg);
957 }
958 
959 static const char *fg_item_name(void *obj)
960 {
961  const FilterGraphPriv *fgp = obj;
962 
963  return fgp->log_name;
964 }
965 
966 static const AVClass fg_class = {
967  .class_name = "FilterGraph",
968  .version = LIBAVUTIL_VERSION_INT,
969  .item_name = fg_item_name,
970  .category = AV_CLASS_CATEGORY_FILTER,
971 };
972 
973 int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
974 {
975  FilterGraphPriv *fgp;
976  FilterGraph *fg;
977 
979  AVFilterGraph *graph;
980  int ret = 0;
981 
982  fgp = allocate_array_elem(&filtergraphs, sizeof(*fgp), &nb_filtergraphs);
983  if (!fgp)
984  return AVERROR(ENOMEM);
985  fg = &fgp->fg;
986 
987  if (pfg)
988  *pfg = fg;
989 
990  fg->class = &fg_class;
991  fg->index = nb_filtergraphs - 1;
992  fgp->graph_desc = graph_desc;
994  fgp->sch = sch;
995 
996  snprintf(fgp->log_name, sizeof(fgp->log_name), "fc#%d", fg->index);
997 
998  fgp->frame = av_frame_alloc();
999  fgp->frame_enc = av_frame_alloc();
1000  if (!fgp->frame || !fgp->frame_enc)
1001  return AVERROR(ENOMEM);
1002 
1003  /* this graph is only used for determining the kinds of inputs
1004  * and outputs we have, and is discarded on exit from this function */
1005  graph = avfilter_graph_alloc();
1006  if (!graph)
1007  return AVERROR(ENOMEM);;
1008  graph->nb_threads = 1;
1009 
1010  ret = graph_parse(graph, fgp->graph_desc, &inputs, &outputs, NULL);
1011  if (ret < 0)
1012  goto fail;
1013 
1014  for (unsigned i = 0; i < graph->nb_filters; i++) {
1015  const AVFilter *f = graph->filters[i]->filter;
1016  if (!avfilter_filter_pad_count(f, 0) &&
1017  !(f->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)) {
1018  fgp->have_sources = 1;
1019  break;
1020  }
1021  }
1022 
1023  for (AVFilterInOut *cur = inputs; cur; cur = cur->next) {
1024  InputFilter *const ifilter = ifilter_alloc(fg);
1025  InputFilterPriv *ifp;
1026 
1027  if (!ifilter) {
1028  ret = AVERROR(ENOMEM);
1029  goto fail;
1030  }
1031 
1032  ifp = ifp_from_ifilter(ifilter);
1033  ifp->linklabel = cur->name;
1034  cur->name = NULL;
1035 
1036  ifp->type = avfilter_pad_get_type(cur->filter_ctx->input_pads,
1037  cur->pad_idx);
1038 
1039  if (ifp->type != AVMEDIA_TYPE_VIDEO && ifp->type != AVMEDIA_TYPE_AUDIO) {
1040  av_log(fg, AV_LOG_FATAL, "Only video and audio filters supported "
1041  "currently.\n");
1042  ret = AVERROR(ENOSYS);
1043  goto fail;
1044  }
1045 
1046  ifilter->name = describe_filter_link(fg, cur, 1);
1047  if (!ifilter->name) {
1048  ret = AVERROR(ENOMEM);
1049  goto fail;
1050  }
1051  }
1052 
1053  for (AVFilterInOut *cur = outputs; cur; cur = cur->next) {
1054  OutputFilter *const ofilter = ofilter_alloc(fg);
1055 
1056  if (!ofilter) {
1057  ret = AVERROR(ENOMEM);
1058  goto fail;
1059  }
1060 
1061  ofilter->linklabel = cur->name;
1062  cur->name = NULL;
1063 
1064  ofilter->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
1065  cur->pad_idx);
1066  ofilter->name = describe_filter_link(fg, cur, 0);
1067  if (!ofilter->name) {
1068  ret = AVERROR(ENOMEM);
1069  goto fail;
1070  }
1071  }
1072 
1073  if (!fg->nb_outputs) {
1074  av_log(fg, AV_LOG_FATAL, "A filtergraph has zero outputs, this is not supported\n");
1075  ret = AVERROR(ENOSYS);
1076  goto fail;
1077  }
1078 
1079  ret = sch_add_filtergraph(sch, fg->nb_inputs, fg->nb_outputs,
1080  filter_thread, fgp);
1081  if (ret < 0)
1082  goto fail;
1083  fgp->sch_idx = ret;
1084 
1085 fail:
1088  avfilter_graph_free(&graph);
1089 
1090  if (ret < 0)
1091  return ret;
1092 
1093  return 0;
1094 }
1095 
1097  char *graph_desc,
1098  Scheduler *sch, unsigned sched_idx_enc)
1099 {
1100  FilterGraph *fg;
1101  FilterGraphPriv *fgp;
1102  int ret;
1103 
1104  ret = fg_create(&fg, graph_desc, sch);
1105  if (ret < 0)
1106  return ret;
1107  fgp = fgp_from_fg(fg);
1108 
1109  fgp->is_simple = 1;
1110 
1111  snprintf(fgp->log_name, sizeof(fgp->log_name), "%cf#%d:%d",
1112  av_get_media_type_string(ost->type)[0],
1113  ost->file->index, ost->index);
1114 
1115  if (fg->nb_inputs != 1 || fg->nb_outputs != 1) {
1116  av_log(fg, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
1117  "to have exactly 1 input and 1 output. "
1118  "However, it had %d input(s) and %d output(s). Please adjust, "
1119  "or use a complex filtergraph (-filter_complex) instead.\n",
1120  graph_desc, fg->nb_inputs, fg->nb_outputs);
1121  return AVERROR(EINVAL);
1122  }
1123 
1124  ost->filter = fg->outputs[0];
1125 
1126  ret = ifilter_bind_ist(fg->inputs[0], ist);
1127  if (ret < 0)
1128  return ret;
1129 
1130  ret = ofilter_bind_ost(fg->outputs[0], ost, sched_idx_enc);
1131  if (ret < 0)
1132  return ret;
1133 
1134  return 0;
1135 }
1136 
1138 {
1139  FilterGraphPriv *fgp = fgp_from_fg(fg);
1140  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1141  InputStream *ist = NULL;
1142  enum AVMediaType type = ifp->type;
1143  int i, ret;
1144 
1145  if (ifp->linklabel && !strncmp(ifp->linklabel, "dec:", 4)) {
1146  // bind to a standalone decoder
1147  int dec_idx;
1148 
1149  dec_idx = strtol(ifp->linklabel + 4, NULL, 0);
1150  if (dec_idx < 0 || dec_idx >= nb_decoders) {
1151  av_log(fg, AV_LOG_ERROR, "Invalid decoder index %d in filtergraph description %s\n",
1152  dec_idx, fgp->graph_desc);
1153  return AVERROR(EINVAL);
1154  }
1155 
1156  ret = ifilter_bind_dec(ifp, decoders[dec_idx]);
1157  if (ret < 0)
1158  av_log(fg, AV_LOG_ERROR, "Error binding a decoder to filtergraph input %s\n",
1159  ifilter->name);
1160  return ret;
1161  } else if (ifp->linklabel) {
1162  // bind to an explicitly specified demuxer stream
1163  AVFormatContext *s;
1164  AVStream *st = NULL;
1165  char *p;
1166  int file_idx = strtol(ifp->linklabel, &p, 0);
1167 
1168  if (file_idx < 0 || file_idx >= nb_input_files) {
1169  av_log(fg, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
1170  file_idx, fgp->graph_desc);
1171  return AVERROR(EINVAL);
1172  }
1173  s = input_files[file_idx]->ctx;
1174 
1175  for (i = 0; i < s->nb_streams; i++) {
1176  enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
1177  if (stream_type != type &&
1178  !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
1179  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
1180  continue;
1181  if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
1182  st = s->streams[i];
1183  break;
1184  }
1185  }
1186  if (!st) {
1187  av_log(fg, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
1188  "matches no streams.\n", p, fgp->graph_desc);
1189  return AVERROR(EINVAL);
1190  }
1191  ist = input_files[file_idx]->streams[st->index];
1192 
1193  av_log(fg, AV_LOG_VERBOSE,
1194  "Binding input with label '%s' to input stream %d:%d\n",
1195  ifp->linklabel, ist->file->index, ist->index);
1196  } else {
1197  ist = ist_find_unused(type);
1198  if (!ist) {
1199  av_log(fg, AV_LOG_FATAL, "Cannot find a matching stream for "
1200  "unlabeled input pad %s\n", ifilter->name);
1201  return AVERROR(EINVAL);
1202  }
1203 
1204  av_log(fg, AV_LOG_VERBOSE,
1205  "Binding unlabeled input %d to input stream %d:%d\n",
1206  ifp->index, ist->file->index, ist->index);
1207  }
1208  av_assert0(ist);
1209 
1210  ret = ifilter_bind_ist(ifilter, ist);
1211  if (ret < 0) {
1212  av_log(fg, AV_LOG_ERROR,
1213  "Error binding an input stream to complex filtergraph input %s.\n",
1214  ifilter->name);
1215  return ret;
1216  }
1217 
1218  return 0;
1219 }
1220 
1222 {
1223  // bind filtergraph inputs to input streams
1224  for (int i = 0; i < fg->nb_inputs; i++) {
1226  int ret;
1227 
1228  if (ifp->bound)
1229  continue;
1230 
1231  ret = fg_complex_bind_input(fg, &ifp->ifilter);
1232  if (ret < 0)
1233  return ret;
1234  }
1235 
1236  for (int i = 0; i < fg->nb_outputs; i++) {
1237  OutputFilter *output = fg->outputs[i];
1238  if (!output->ost) {
1240  "Filter %s has an unconnected output\n", output->name);
1241  return AVERROR(EINVAL);
1242  }
1243  }
1244  return 0;
1245 }
1246 
1247 static int insert_trim(int64_t start_time, int64_t duration,
1248  AVFilterContext **last_filter, int *pad_idx,
1249  const char *filter_name)
1250 {
1251  AVFilterGraph *graph = (*last_filter)->graph;
1253  const AVFilter *trim;
1254  enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
1255  const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
1256  int ret = 0;
1257 
1258  if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
1259  return 0;
1260 
1261  trim = avfilter_get_by_name(name);
1262  if (!trim) {
1263  av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
1264  "recording time.\n", name);
1265  return AVERROR_FILTER_NOT_FOUND;
1266  }
1267 
1268  ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
1269  if (!ctx)
1270  return AVERROR(ENOMEM);
1271 
1272  if (duration != INT64_MAX) {
1273  ret = av_opt_set_int(ctx, "durationi", duration,
1275  }
1276  if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
1277  ret = av_opt_set_int(ctx, "starti", start_time,
1279  }
1280  if (ret < 0) {
1281  av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
1282  return ret;
1283  }
1284 
1286  if (ret < 0)
1287  return ret;
1288 
1289  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1290  if (ret < 0)
1291  return ret;
1292 
1293  *last_filter = ctx;
1294  *pad_idx = 0;
1295  return 0;
1296 }
1297 
1298 static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
1299  const char *filter_name, const char *args)
1300 {
1301  AVFilterGraph *graph = (*last_filter)->graph;
1303  int ret;
1304 
1306  avfilter_get_by_name(filter_name),
1307  filter_name, args, NULL, graph);
1308  if (ret < 0)
1309  return ret;
1310 
1311  ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
1312  if (ret < 0)
1313  return ret;
1314 
1315  *last_filter = ctx;
1316  *pad_idx = 0;
1317  return 0;
1318 }
1319 
1321  OutputFilter *ofilter, AVFilterInOut *out)
1322 {
1323  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1324  OutputStream *ost = ofilter->ost;
1325  OutputFile *of = ost->file;
1326  AVFilterContext *last_filter = out->filter_ctx;
1327  AVBPrint bprint;
1328  int pad_idx = out->pad_idx;
1329  int ret;
1330  const char *pix_fmts;
1331  char name[255];
1332 
1333  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1335  avfilter_get_by_name("buffersink"),
1336  name, NULL, NULL, graph);
1337 
1338  if (ret < 0)
1339  return ret;
1340 
1341  if ((ofp->width || ofp->height) && ofilter->ost->autoscale) {
1342  char args[255];
1344  const AVDictionaryEntry *e = NULL;
1345 
1346  snprintf(args, sizeof(args), "%d:%d",
1347  ofp->width, ofp->height);
1348 
1349  while ((e = av_dict_iterate(ost->sws_dict, e))) {
1350  av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
1351  }
1352 
1353  snprintf(name, sizeof(name), "scaler_out_%d_%d",
1354  ost->file->index, ost->index);
1356  name, args, NULL, graph)) < 0)
1357  return ret;
1358  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1359  return ret;
1360 
1361  last_filter = filter;
1362  pad_idx = 0;
1363  }
1364 
1366  ret = choose_pix_fmts(ofilter, &bprint, &pix_fmts);
1367  if (ret < 0)
1368  return ret;
1369 
1370  if (pix_fmts) {
1372 
1374  avfilter_get_by_name("format"),
1375  "format", pix_fmts, NULL, graph);
1376  av_bprint_finalize(&bprint, NULL);
1377  if (ret < 0)
1378  return ret;
1379  if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
1380  return ret;
1381 
1382  last_filter = filter;
1383  pad_idx = 0;
1384  }
1385 
1386  snprintf(name, sizeof(name), "trim_out_%d_%d",
1387  ost->file->index, ost->index);
1389  &last_filter, &pad_idx, name);
1390  if (ret < 0)
1391  return ret;
1392 
1393 
1394  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1395  return ret;
1396 
1397  return 0;
1398 }
1399 
1401  OutputFilter *ofilter, AVFilterInOut *out)
1402 {
1403  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1404  OutputStream *ost = ofilter->ost;
1405  OutputFile *of = ost->file;
1406  AVFilterContext *last_filter = out->filter_ctx;
1407  int pad_idx = out->pad_idx;
1408  AVBPrint args;
1409  char name[255];
1410  int ret;
1411 
1412  snprintf(name, sizeof(name), "out_%d_%d", ost->file->index, ost->index);
1414  avfilter_get_by_name("abuffersink"),
1415  name, NULL, NULL, graph);
1416  if (ret < 0)
1417  return ret;
1418  if ((ret = av_opt_set_int(ofp->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1419  return ret;
1420 
1421 #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
1422  AVFilterContext *filt_ctx; \
1423  \
1424  av_log(fg, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
1425  "similarly to -af " filter_name "=%s.\n", arg); \
1426  \
1427  ret = avfilter_graph_create_filter(&filt_ctx, \
1428  avfilter_get_by_name(filter_name), \
1429  filter_name, arg, NULL, graph); \
1430  if (ret < 0) \
1431  goto fail; \
1432  \
1433  ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
1434  if (ret < 0) \
1435  goto fail; \
1436  \
1437  last_filter = filt_ctx; \
1438  pad_idx = 0; \
1439 } while (0)
1441 
1442  choose_sample_fmts(ofp, &args);
1443  choose_sample_rates(ofp, &args);
1444  choose_channel_layouts(ofp, &args);
1445  if (!av_bprint_is_complete(&args)) {
1446  ret = AVERROR(ENOMEM);
1447  goto fail;
1448  }
1449  if (args.len) {
1451 
1452  snprintf(name, sizeof(name), "format_out_%d_%d",
1453  ost->file->index, ost->index);
1455  avfilter_get_by_name("aformat"),
1456  name, args.str, NULL, graph);
1457  if (ret < 0)
1458  goto fail;
1459 
1460  ret = avfilter_link(last_filter, pad_idx, format, 0);
1461  if (ret < 0)
1462  goto fail;
1463 
1464  last_filter = format;
1465  pad_idx = 0;
1466  }
1467 
1468  if (ost->apad && of->shortest) {
1469  int i;
1470 
1471  for (i = 0; i < of->nb_streams; i++)
1473  break;
1474 
1475  if (i < of->nb_streams) {
1476  AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
1477  }
1478  }
1479 
1480  snprintf(name, sizeof(name), "trim for output stream %d:%d",
1481  ost->file->index, ost->index);
1483  &last_filter, &pad_idx, name);
1484  if (ret < 0)
1485  goto fail;
1486 
1487  if ((ret = avfilter_link(last_filter, pad_idx, ofp->filter, 0)) < 0)
1488  goto fail;
1489 fail:
1490  av_bprint_finalize(&args, NULL);
1491 
1492  return ret;
1493 }
1494 
1496  OutputFilter *ofilter, AVFilterInOut *out)
1497 {
1498  if (!ofilter->ost) {
1499  av_log(fg, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
1500  return AVERROR(EINVAL);
1501  }
1502 
1503  switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
1504  case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, graph, ofilter, out);
1505  case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, graph, ofilter, out);
1506  default: av_assert0(0); return 0;
1507  }
1508 }
1509 
1511 {
1512  ifp->sub2video.last_pts = INT64_MIN;
1513  ifp->sub2video.end_pts = INT64_MIN;
1514 
1515  /* sub2video structure has been (re-)initialized.
1516  Mark it as such so that the system will be
1517  initialized with the first received heartbeat. */
1518  ifp->sub2video.initialize = 1;
1519 }
1520 
1522  InputFilter *ifilter, AVFilterInOut *in)
1523 {
1524  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1525 
1526  AVFilterContext *last_filter;
1527  const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
1528  const AVPixFmtDescriptor *desc;
1529  AVRational fr = ifp->opts.framerate;
1530  AVRational sar;
1531  AVBPrint args;
1532  char name[255];
1533  int ret, pad_idx = 0;
1535  if (!par)
1536  return AVERROR(ENOMEM);
1537 
1538  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE)
1539  sub2video_prepare(ifp);
1540 
1541  sar = ifp->sample_aspect_ratio;
1542  if(!sar.den)
1543  sar = (AVRational){0,1};
1545  av_bprintf(&args,
1546  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
1547  "pixel_aspect=%d/%d:colorspace=%d:range=%d",
1548  ifp->width, ifp->height, ifp->format,
1549  ifp->time_base.num, ifp->time_base.den, sar.num, sar.den,
1550  ifp->color_space, ifp->color_range);
1551  if (fr.num && fr.den)
1552  av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
1553  snprintf(name, sizeof(name), "graph %d input from stream %s", fg->index,
1554  ifp->opts.name);
1555 
1556 
1557  if ((ret = avfilter_graph_create_filter(&ifp->filter, buffer_filt, name,
1558  args.str, NULL, graph)) < 0)
1559  goto fail;
1560  par->hw_frames_ctx = ifp->hw_frames_ctx;
1561  ret = av_buffersrc_parameters_set(ifp->filter, par);
1562  if (ret < 0)
1563  goto fail;
1564  av_freep(&par);
1565  last_filter = ifp->filter;
1566 
1568  av_assert0(desc);
1569 
1570  // TODO: insert hwaccel enabled filters like transpose_vaapi into the graph
1571  if ((ifp->opts.flags & IFILTER_FLAG_AUTOROTATE) &&
1572  !(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
1573  int32_t *displaymatrix = ifp->displaymatrix;
1574  double theta;
1575 
1576  theta = get_rotation(displaymatrix);
1577 
1578  if (fabs(theta - 90) < 1.0) {
1579  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1580  displaymatrix[3] > 0 ? "cclock_flip" : "clock");
1581  } else if (fabs(theta - 180) < 1.0) {
1582  if (displaymatrix[0] < 0) {
1583  ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
1584  if (ret < 0)
1585  return ret;
1586  }
1587  if (displaymatrix[4] < 0) {
1588  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1589  }
1590  } else if (fabs(theta - 270) < 1.0) {
1591  ret = insert_filter(&last_filter, &pad_idx, "transpose",
1592  displaymatrix[3] < 0 ? "clock_flip" : "cclock");
1593  } else if (fabs(theta) > 1.0) {
1594  char rotate_buf[64];
1595  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1596  ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
1597  } else if (fabs(theta) < 1.0) {
1598  if (displaymatrix && displaymatrix[4] < 0) {
1599  ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
1600  }
1601  }
1602  if (ret < 0)
1603  return ret;
1604  }
1605 
1606  snprintf(name, sizeof(name), "trim_in_%s", ifp->opts.name);
1608  &last_filter, &pad_idx, name);
1609  if (ret < 0)
1610  return ret;
1611 
1612  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1613  return ret;
1614  return 0;
1615 fail:
1616  av_freep(&par);
1617 
1618  return ret;
1619 }
1620 
1622  InputFilter *ifilter, AVFilterInOut *in)
1623 {
1624  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1625  AVFilterContext *last_filter;
1626  const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
1627  AVBPrint args;
1628  char name[255];
1629  int ret, pad_idx = 0;
1630 
1632  av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1633  ifp->time_base.num, ifp->time_base.den,
1634  ifp->sample_rate,
1636  if (av_channel_layout_check(&ifp->ch_layout) &&
1638  av_bprintf(&args, ":channel_layout=");
1640  } else
1641  av_bprintf(&args, ":channels=%d", ifp->ch_layout.nb_channels);
1642  snprintf(name, sizeof(name), "graph_%d_in_%s", fg->index, ifp->opts.name);
1643 
1644  if ((ret = avfilter_graph_create_filter(&ifp->filter, abuffer_filt,
1645  name, args.str, NULL,
1646  graph)) < 0)
1647  return ret;
1648  last_filter = ifp->filter;
1649 
1650  snprintf(name, sizeof(name), "trim for input stream %s", ifp->opts.name);
1652  &last_filter, &pad_idx, name);
1653  if (ret < 0)
1654  return ret;
1655 
1656  if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
1657  return ret;
1658 
1659  return 0;
1660 }
1661 
1663  InputFilter *ifilter, AVFilterInOut *in)
1664 {
1665  switch (ifp_from_ifilter(ifilter)->type) {
1666  case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, graph, ifilter, in);
1667  case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, graph, ifilter, in);
1668  default: av_assert0(0); return 0;
1669  }
1670 }
1671 
1673 {
1674  for (int i = 0; i < fg->nb_outputs; i++)
1676  for (int i = 0; i < fg->nb_inputs; i++)
1677  ifp_from_ifilter(fg->inputs[i])->filter = NULL;
1678  avfilter_graph_free(&fgt->graph);
1679 }
1680 
1682 {
1683  return f->nb_inputs == 0 &&
1684  (!strcmp(f->filter->name, "buffer") ||
1685  !strcmp(f->filter->name, "abuffer"));
1686 }
1687 
1688 static int graph_is_meta(AVFilterGraph *graph)
1689 {
1690  for (unsigned i = 0; i < graph->nb_filters; i++) {
1691  const AVFilterContext *f = graph->filters[i];
1692 
1693  /* in addition to filters flagged as meta, also
1694  * disregard sinks and buffersources (but not other sources,
1695  * since they introduce data we are not aware of)
1696  */
1697  if (!((f->filter->flags & AVFILTER_FLAG_METADATA_ONLY) ||
1698  f->nb_outputs == 0 ||
1700  return 0;
1701  }
1702  return 1;
1703 }
1704 
1705 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer);
1706 
1708 {
1709  FilterGraphPriv *fgp = fgp_from_fg(fg);
1710  AVBufferRef *hw_device;
1711  AVFilterInOut *inputs, *outputs, *cur;
1712  int ret, i, simple = filtergraph_is_simple(fg);
1713  int have_input_eof = 0;
1714  const char *graph_desc = fgp->graph_desc;
1715 
1716  cleanup_filtergraph(fg, fgt);
1717  fgt->graph = avfilter_graph_alloc();
1718  if (!fgt->graph)
1719  return AVERROR(ENOMEM);
1720 
1721  if (simple) {
1722  OutputStream *ost = fg->outputs[0]->ost;
1723 
1724  if (filter_nbthreads) {
1725  ret = av_opt_set(fgt->graph, "threads", filter_nbthreads, 0);
1726  if (ret < 0)
1727  goto fail;
1728  } else {
1729  const AVDictionaryEntry *e = NULL;
1730  e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
1731  if (e)
1732  av_opt_set(fgt->graph, "threads", e->value, 0);
1733  }
1734 
1735  if (av_dict_count(ost->sws_dict)) {
1736  ret = av_dict_get_string(ost->sws_dict,
1737  &fgt->graph->scale_sws_opts,
1738  '=', ':');
1739  if (ret < 0)
1740  goto fail;
1741  }
1742 
1743  if (av_dict_count(ost->swr_opts)) {
1744  char *args;
1745  ret = av_dict_get_string(ost->swr_opts, &args, '=', ':');
1746  if (ret < 0)
1747  goto fail;
1748  av_opt_set(fgt->graph, "aresample_swr_opts", args, 0);
1749  av_free(args);
1750  }
1751  } else {
1753  }
1754 
1755  hw_device = hw_device_for_filter();
1756 
1757  if ((ret = graph_parse(fgt->graph, graph_desc, &inputs, &outputs, hw_device)) < 0)
1758  goto fail;
1759 
1760  for (cur = inputs, i = 0; cur; cur = cur->next, i++)
1761  if ((ret = configure_input_filter(fg, fgt->graph, fg->inputs[i], cur)) < 0) {
1764  goto fail;
1765  }
1767 
1768  for (cur = outputs, i = 0; cur; cur = cur->next, i++) {
1769  ret = configure_output_filter(fg, fgt->graph, fg->outputs[i], cur);
1770  if (ret < 0) {
1772  goto fail;
1773  }
1774  }
1776 
1777  if (fgp->disable_conversions)
1779  if ((ret = avfilter_graph_config(fgt->graph, NULL)) < 0)
1780  goto fail;
1781 
1782  fgp->is_meta = graph_is_meta(fgt->graph);
1783 
1784  /* limit the lists of allowed formats to the ones selected, to
1785  * make sure they stay the same if the filtergraph is reconfigured later */
1786  for (int i = 0; i < fg->nb_outputs; i++) {
1787  OutputFilter *ofilter = fg->outputs[i];
1788  OutputFilterPriv *ofp = ofp_from_ofilter(ofilter);
1789  AVFilterContext *sink = ofp->filter;
1790 
1791  ofp->format = av_buffersink_get_format(sink);
1792 
1793  ofp->width = av_buffersink_get_w(sink);
1794  ofp->height = av_buffersink_get_h(sink);
1795 
1796  // If the timing parameters are not locked yet, get the tentative values
1797  // here but don't lock them. They will only be used if no output frames
1798  // are ever produced.
1799  if (!ofp->tb_out_locked) {
1801  if (ofp->fps.framerate.num <= 0 && ofp->fps.framerate.den <= 0 &&
1802  fr.num > 0 && fr.den > 0)
1803  ofp->fps.framerate = fr;
1804  ofp->tb_out = av_buffersink_get_time_base(sink);
1805  }
1807 
1810  ret = av_buffersink_get_ch_layout(sink, &ofp->ch_layout);
1811  if (ret < 0)
1812  goto fail;
1813  }
1814 
1815  for (int i = 0; i < fg->nb_inputs; i++) {
1817  AVFrame *tmp;
1818  while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
1819  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
1820  sub2video_frame(&ifp->ifilter, tmp, !fgt->graph);
1821  } else {
1823  }
1824  av_frame_free(&tmp);
1825  if (ret < 0)
1826  goto fail;
1827  }
1828  }
1829 
1830  /* send the EOFs for the finished inputs */
1831  for (int i = 0; i < fg->nb_inputs; i++) {
1833  if (fgt->eof_in[i]) {
1835  if (ret < 0)
1836  goto fail;
1837  have_input_eof = 1;
1838  }
1839  }
1840 
1841  if (have_input_eof) {
1842  // make sure the EOF propagates to the end of the graph
1844  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1845  goto fail;
1846  }
1847 
1848  return 0;
1849 fail:
1850  cleanup_filtergraph(fg, fgt);
1851  return ret;
1852 }
1853 
1855 {
1856  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1857  AVFrameSideData *sd;
1858  int ret;
1859 
1861  if (ret < 0)
1862  return ret;
1863 
1864  ifp->time_base = (ifp->type == AVMEDIA_TYPE_AUDIO) ? (AVRational){ 1, frame->sample_rate } :
1865  (ifp->opts.flags & IFILTER_FLAG_CFR) ? av_inv_q(ifp->opts.framerate) :
1866  frame->time_base;
1867 
1868  ifp->format = frame->format;
1869 
1870  ifp->width = frame->width;
1871  ifp->height = frame->height;
1873  ifp->color_space = frame->colorspace;
1874  ifp->color_range = frame->color_range;
1875 
1876  ifp->sample_rate = frame->sample_rate;
1878  if (ret < 0)
1879  return ret;
1880 
1882  if (sd)
1883  memcpy(ifp->displaymatrix, sd->data, sizeof(ifp->displaymatrix));
1884  ifp->displaymatrix_present = !!sd;
1885 
1886  return 0;
1887 }
1888 
1890 {
1891  const FilterGraphPriv *fgp = cfgp_from_cfg(fg);
1892  return fgp->is_simple;
1893 }
1894 
1895 static void send_command(FilterGraph *fg, AVFilterGraph *graph,
1896  double time, const char *target,
1897  const char *command, const char *arg, int all_filters)
1898 {
1899  int ret;
1900 
1901  if (!graph)
1902  return;
1903 
1904  if (time < 0) {
1905  char response[4096];
1906  ret = avfilter_graph_send_command(graph, target, command, arg,
1907  response, sizeof(response),
1908  all_filters ? 0 : AVFILTER_CMD_FLAG_ONE);
1909  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s",
1910  fg->index, ret, response);
1911  } else if (!all_filters) {
1912  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
1913  } else {
1914  ret = avfilter_graph_queue_command(graph, target, command, arg, 0, time);
1915  if (ret < 0)
1916  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
1917  }
1918 }
1919 
1920 static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
1921 {
1922  int nb_requests, nb_requests_max = -1;
1923  int best_input = -1;
1924 
1925  for (int i = 0; i < fg->nb_inputs; i++) {
1926  InputFilter *ifilter = fg->inputs[i];
1927  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
1928 
1929  if (fgt->eof_in[i])
1930  continue;
1931 
1932  nb_requests = av_buffersrc_get_nb_failed_requests(ifp->filter);
1933  if (nb_requests > nb_requests_max) {
1934  nb_requests_max = nb_requests;
1935  best_input = i;
1936  }
1937  }
1938 
1939  av_assert0(best_input >= 0);
1940 
1941  return best_input;
1942 }
1943 
1945 {
1946  OutputFilter *ofilter = &ofp->ofilter;
1947  FPSConvContext *fps = &ofp->fps;
1948  AVRational tb = (AVRational){ 0, 0 };
1949  AVRational fr;
1950  const FrameData *fd;
1951 
1952  fd = frame_data_c(frame);
1953 
1954  // apply -enc_time_base
1955  if (ofp->enc_timebase.num == ENC_TIME_BASE_DEMUX &&
1956  (fd->dec.tb.num <= 0 || fd->dec.tb.den <= 0)) {
1957  av_log(ofilter->ost, AV_LOG_ERROR,
1958  "Demuxing timebase not available - cannot use it for encoding\n");
1959  return AVERROR(EINVAL);
1960  }
1961 
1962  switch (ofp->enc_timebase.num) {
1963  case 0: break;
1964  case ENC_TIME_BASE_DEMUX: tb = fd->dec.tb; break;
1965  case ENC_TIME_BASE_FILTER: tb = frame->time_base; break;
1966  default: tb = ofp->enc_timebase; break;
1967  }
1968 
1969  if (ofilter->type == AVMEDIA_TYPE_AUDIO) {
1970  tb = tb.num ? tb : (AVRational){ 1, frame->sample_rate };
1971  goto finish;
1972  }
1973 
1974  fr = fps->framerate;
1975  if (!fr.num) {
1977  if (fr_sink.num > 0 && fr_sink.den > 0)
1978  fr = fr_sink;
1979  }
1980 
1981  if (ofilter->ost->is_cfr) {
1982  if (!fr.num && !fps->framerate_max.num) {
1983  fr = (AVRational){25, 1};
1984  av_log(ofilter->ost, AV_LOG_WARNING,
1985  "No information "
1986  "about the input framerate is available. Falling "
1987  "back to a default value of 25fps. Use the -r option "
1988  "if you want a different framerate.\n");
1989  }
1990 
1991  if (fps->framerate_max.num &&
1992  (av_q2d(fr) > av_q2d(fps->framerate_max) ||
1993  !fr.den))
1994  fr = fps->framerate_max;
1995  }
1996 
1997  if (fr.num > 0) {
1998  if (fps->framerate_supported) {
1999  int idx = av_find_nearest_q_idx(fr, fps->framerate_supported);
2000  fr = fps->framerate_supported[idx];
2001  }
2002  if (fps->framerate_clip) {
2003  av_reduce(&fr.num, &fr.den,
2004  fr.num, fr.den, fps->framerate_clip);
2005  }
2006  }
2007 
2008  if (!(tb.num > 0 && tb.den > 0))
2009  tb = av_inv_q(fr);
2010  if (!(tb.num > 0 && tb.den > 0))
2011  tb = frame->time_base;
2012 
2013  fps->framerate = fr;
2014 finish:
2015  ofp->tb_out = tb;
2016  ofp->tb_out_locked = 1;
2017 
2018  return 0;
2019 }
2020 
2022  int64_t start_time)
2023 {
2024  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
2025 
2026  AVRational tb = tb_dst;
2027  AVRational filter_tb = frame->time_base;
2028  const int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
2029 
2030  if (frame->pts == AV_NOPTS_VALUE)
2031  goto early_exit;
2032 
2033  tb.den <<= extra_bits;
2034  float_pts = av_rescale_q(frame->pts, filter_tb, tb) -
2036  float_pts /= 1 << extra_bits;
2037  // when float_pts is not exactly an integer,
2038  // avoid exact midpoints to reduce the chance of rounding differences, this
2039  // can be removed in case the fps code is changed to work with integers
2040  if (float_pts != llrint(float_pts))
2041  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
2042 
2043  frame->pts = av_rescale_q(frame->pts, filter_tb, tb_dst) -
2045  frame->time_base = tb_dst;
2046 
2047 early_exit:
2048 
2049  if (debug_ts) {
2050  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
2051  frame ? av_ts2str(frame->pts) : "NULL",
2052  av_ts2timestr(frame->pts, &tb_dst),
2053  float_pts, tb_dst.num, tb_dst.den);
2054  }
2055 
2056  return float_pts;
2057 }
2058 
2059 /* Convert frame timestamps to the encoder timebase and decide how many times
2060  * should this (and possibly previous) frame be repeated in order to conform to
2061  * desired target framerate (if any).
2062  */
2064  int64_t *nb_frames, int64_t *nb_frames_prev)
2065 {
2066  OutputFilter *ofilter = &ofp->ofilter;
2067  OutputStream *ost = ofilter->ost;
2068  FPSConvContext *fps = &ofp->fps;
2069  double delta0, delta, sync_ipts, duration;
2070 
2071  if (!frame) {
2072  *nb_frames_prev = *nb_frames = mid_pred(fps->frames_prev_hist[0],
2073  fps->frames_prev_hist[1],
2074  fps->frames_prev_hist[2]);
2075 
2076  if (!*nb_frames && fps->last_dropped) {
2077  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2078  fps->last_dropped++;
2079  }
2080 
2081  goto finish;
2082  }
2083 
2085 
2086  sync_ipts = adjust_frame_pts_to_encoder_tb(frame, ofp->tb_out, ofp->ts_offset);
2087  /* delta0 is the "drift" between the input frame and
2088  * where it would fall in the output. */
2089  delta0 = sync_ipts - ofp->next_pts;
2090  delta = delta0 + duration;
2091 
2092  // tracks the number of times the PREVIOUS frame should be duplicated,
2093  // mostly for variable framerate (VFR)
2094  *nb_frames_prev = 0;
2095  /* by default, we output a single frame */
2096  *nb_frames = 1;
2097 
2098  if (delta0 < 0 &&
2099  delta > 0 &&
2100  ost->vsync_method != VSYNC_PASSTHROUGH
2102  && ost->vsync_method != VSYNC_DROP
2103 #endif
2104  ) {
2105  if (delta0 < -0.6) {
2106  av_log(ost, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
2107  } else
2108  av_log(ost, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
2109  sync_ipts = ofp->next_pts;
2110  duration += delta0;
2111  delta0 = 0;
2112  }
2113 
2114  switch (ost->vsync_method) {
2115  case VSYNC_VSCFR:
2116  if (fps->frame_number == 0 && delta0 >= 0.5) {
2117  av_log(ost, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
2118  delta = duration;
2119  delta0 = 0;
2120  ofp->next_pts = llrint(sync_ipts);
2121  }
2122  case VSYNC_CFR:
2123  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
2124  if (frame_drop_threshold && delta < frame_drop_threshold && fps->frame_number) {
2125  *nb_frames = 0;
2126  } else if (delta < -1.1)
2127  *nb_frames = 0;
2128  else if (delta > 1.1) {
2129  *nb_frames = llrintf(delta);
2130  if (delta0 > 1.1)
2131  *nb_frames_prev = llrintf(delta0 - 0.6);
2132  }
2133  frame->duration = 1;
2134  break;
2135  case VSYNC_VFR:
2136  if (delta <= -0.6)
2137  *nb_frames = 0;
2138  else if (delta > 0.6)
2139  ofp->next_pts = llrint(sync_ipts);
2141  break;
2142 #if FFMPEG_OPT_VSYNC_DROP
2143  case VSYNC_DROP:
2144 #endif
2145  case VSYNC_PASSTHROUGH:
2146  ofp->next_pts = llrint(sync_ipts);
2148  break;
2149  default:
2150  av_assert0(0);
2151  }
2152 
2153 finish:
2154  memmove(fps->frames_prev_hist + 1,
2155  fps->frames_prev_hist,
2156  sizeof(fps->frames_prev_hist[0]) * (FF_ARRAY_ELEMS(fps->frames_prev_hist) - 1));
2157  fps->frames_prev_hist[0] = *nb_frames_prev;
2158 
2159  if (*nb_frames_prev == 0 && fps->last_dropped) {
2160  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2162  "*** dropping frame %"PRId64" at ts %"PRId64"\n",
2163  fps->frame_number, fps->last_frame->pts);
2164  }
2165  if (*nb_frames > (*nb_frames_prev && fps->last_dropped) + (*nb_frames > *nb_frames_prev)) {
2166  uint64_t nb_frames_dup;
2167  if (*nb_frames > dts_error_threshold * 30) {
2168  av_log(ost, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", *nb_frames - 1);
2169  atomic_fetch_add(&ofilter->nb_frames_drop, 1);
2170  *nb_frames = 0;
2171  return;
2172  }
2173  nb_frames_dup = atomic_fetch_add(&ofilter->nb_frames_dup,
2174  *nb_frames - (*nb_frames_prev && fps->last_dropped) - (*nb_frames > *nb_frames_prev));
2175  av_log(ost, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", *nb_frames - 1);
2176  if (nb_frames_dup > fps->dup_warning) {
2177  av_log(ost, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", fps->dup_warning);
2178  fps->dup_warning *= 10;
2179  }
2180  }
2181 
2182  fps->last_dropped = *nb_frames == *nb_frames_prev && frame;
2184 }
2185 
2187 {
2189  int ret;
2190 
2191  // we are finished and no frames were ever seen at this output,
2192  // at least initialize the encoder with a dummy frame
2193  if (!fgt->got_frame) {
2194  AVFrame *frame = fgt->frame;
2195  FrameData *fd;
2196 
2197  frame->time_base = ofp->tb_out;
2198  frame->format = ofp->format;
2199 
2200  frame->width = ofp->width;
2201  frame->height = ofp->height;
2203 
2204  frame->sample_rate = ofp->sample_rate;
2205  if (ofp->ch_layout.nb_channels) {
2207  if (ret < 0)
2208  return ret;
2209  }
2210 
2211  fd = frame_data(frame);
2212  if (!fd)
2213  return AVERROR(ENOMEM);
2214 
2215  fd->frame_rate_filter = ofp->fps.framerate;
2216 
2217  av_assert0(!frame->buf[0]);
2218 
2220  "No filtered frames for output stream, trying to "
2221  "initialize anyway.\n");
2222 
2223  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame);
2224  if (ret < 0) {
2226  return ret;
2227  }
2228  }
2229 
2230  fgt->eof_out[ofp->index] = 1;
2231 
2232  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, NULL);
2233  return (ret == AVERROR_EOF) ? 0 : ret;
2234 }
2235 
2237  AVFrame *frame)
2238 {
2240  AVFrame *frame_prev = ofp->fps.last_frame;
2241  enum AVMediaType type = ofp->ofilter.type;
2242 
2243  int64_t nb_frames = !!frame, nb_frames_prev = 0;
2244 
2245  if (type == AVMEDIA_TYPE_VIDEO && (frame || fgt->got_frame))
2246  video_sync_process(ofp, frame, &nb_frames, &nb_frames_prev);
2247 
2248  for (int64_t i = 0; i < nb_frames; i++) {
2249  AVFrame *frame_out;
2250  int ret;
2251 
2252  if (type == AVMEDIA_TYPE_VIDEO) {
2253  AVFrame *frame_in = (i < nb_frames_prev && frame_prev->buf[0]) ?
2254  frame_prev : frame;
2255  if (!frame_in)
2256  break;
2257 
2258  frame_out = fgp->frame_enc;
2259  ret = av_frame_ref(frame_out, frame_in);
2260  if (ret < 0)
2261  return ret;
2262 
2263  frame_out->pts = ofp->next_pts;
2264 
2265  if (ofp->fps.dropped_keyframe) {
2266  frame_out->flags |= AV_FRAME_FLAG_KEY;
2267  ofp->fps.dropped_keyframe = 0;
2268  }
2269  } else {
2270  frame->pts = (frame->pts == AV_NOPTS_VALUE) ? ofp->next_pts :
2273 
2274  frame->time_base = ofp->tb_out;
2276  (AVRational){ 1, frame->sample_rate },
2277  ofp->tb_out);
2278 
2279  ofp->next_pts = frame->pts + frame->duration;
2280 
2281  frame_out = frame;
2282  }
2283 
2284  // send the frame to consumers
2285  ret = sch_filter_send(fgp->sch, fgp->sch_idx, ofp->index, frame_out);
2286  if (ret < 0) {
2287  av_frame_unref(frame_out);
2288 
2289  if (!fgt->eof_out[ofp->index]) {
2290  fgt->eof_out[ofp->index] = 1;
2291  fgp->nb_outputs_done++;
2292  }
2293 
2294  return ret == AVERROR_EOF ? 0 : ret;
2295  }
2296 
2297  if (type == AVMEDIA_TYPE_VIDEO) {
2298  ofp->fps.frame_number++;
2299  ofp->next_pts++;
2300 
2301  if (i == nb_frames_prev && frame)
2303  }
2304 
2305  fgt->got_frame = 1;
2306  }
2307 
2308  if (frame && frame_prev) {
2309  av_frame_unref(frame_prev);
2310  av_frame_move_ref(frame_prev, frame);
2311  }
2312 
2313  if (!frame)
2314  return close_output(ofp, fgt);
2315 
2316  return 0;
2317 }
2318 
2320  AVFrame *frame)
2321 {
2323  OutputStream *ost = ofp->ofilter.ost;
2324  AVFilterContext *filter = ofp->filter;
2325  FrameData *fd;
2326  int ret;
2327 
2330  if (ret == AVERROR_EOF && !fgt->eof_out[ofp->index]) {
2331  ret = fg_output_frame(ofp, fgt, NULL);
2332  return (ret < 0) ? ret : 1;
2333  } else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
2334  return 1;
2335  } else if (ret < 0) {
2336  av_log(fgp, AV_LOG_WARNING,
2337  "Error in retrieving a frame from the filtergraph: %s\n",
2338  av_err2str(ret));
2339  return ret;
2340  }
2341 
2342  if (fgt->eof_out[ofp->index]) {
2344  return 0;
2345  }
2346 
2348 
2349  if (debug_ts)
2350  av_log(fgp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
2353 
2354  // Choose the output timebase the first time we get a frame.
2355  if (!ofp->tb_out_locked) {
2356  ret = choose_out_timebase(ofp, frame);
2357  if (ret < 0) {
2358  av_log(ost, AV_LOG_ERROR, "Could not choose an output time base\n");
2360  return ret;
2361  }
2362  }
2363 
2364  fd = frame_data(frame);
2365  if (!fd) {
2367  return AVERROR(ENOMEM);
2368  }
2369 
2371 
2372  // only use bits_per_raw_sample passed through from the decoder
2373  // if the filtergraph did not touch the frame data
2374  if (!fgp->is_meta)
2375  fd->bits_per_raw_sample = 0;
2376 
2377  if (ost->type == AVMEDIA_TYPE_VIDEO) {
2378  if (!frame->duration) {
2380  if (fr.num > 0 && fr.den > 0)
2382  }
2383 
2384  fd->frame_rate_filter = ofp->fps.framerate;
2385  }
2386 
2387  ret = fg_output_frame(ofp, fgt, frame);
2389  if (ret < 0)
2390  return ret;
2391 
2392  return 0;
2393 }
2394 
2395 /* retrieve all frames available at filtergraph outputs
2396  * and send them to consumers */
2398  AVFrame *frame)
2399 {
2400  FilterGraphPriv *fgp = fgp_from_fg(fg);
2401  int did_step = 0;
2402 
2403  // graph not configured, just select the input to request
2404  if (!fgt->graph) {
2405  for (int i = 0; i < fg->nb_inputs; i++) {
2407  if (ifp->format < 0 && !fgt->eof_in[i]) {
2408  fgt->next_in = i;
2409  return 0;
2410  }
2411  }
2412 
2413  // This state - graph is not configured, but all inputs are either
2414  // initialized or EOF - should be unreachable because sending EOF to a
2415  // filter without even a fallback format should fail
2416  av_assert0(0);
2417  return AVERROR_BUG;
2418  }
2419 
2420  while (fgp->nb_outputs_done < fg->nb_outputs) {
2421  int ret;
2422 
2424  if (ret == AVERROR(EAGAIN)) {
2425  fgt->next_in = choose_input(fg, fgt);
2426  break;
2427  } else if (ret < 0) {
2428  if (ret == AVERROR_EOF)
2429  av_log(fg, AV_LOG_VERBOSE, "Filtergraph returned EOF, finishing\n");
2430  else
2431  av_log(fg, AV_LOG_ERROR,
2432  "Error requesting a frame from the filtergraph: %s\n",
2433  av_err2str(ret));
2434  return ret;
2435  }
2436  fgt->next_in = fg->nb_inputs;
2437 
2438  // return after one iteration, so that scheduler can rate-control us
2439  if (did_step && fgp->have_sources)
2440  return 0;
2441 
2442  /* Reap all buffers present in the buffer sinks */
2443  for (int i = 0; i < fg->nb_outputs; i++) {
2445 
2446  ret = 0;
2447  while (!ret) {
2448  ret = fg_output_step(ofp, fgt, frame);
2449  if (ret < 0)
2450  return ret;
2451  }
2452  }
2453  did_step = 1;
2454  }
2455 
2456  return (fgp->nb_outputs_done == fg->nb_outputs) ? AVERROR_EOF : 0;
2457 }
2458 
2459 static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
2460 {
2461  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2462  int64_t pts2;
2463 
2464  /* subtitles seem to be usually muxed ahead of other streams;
2465  if not, subtracting a larger time here is necessary */
2466  pts2 = av_rescale_q(pts, tb, ifp->time_base) - 1;
2467 
2468  /* do not send the heartbeat frame if the subtitle is already ahead */
2469  if (pts2 <= ifp->sub2video.last_pts)
2470  return;
2471 
2472  if (pts2 >= ifp->sub2video.end_pts || ifp->sub2video.initialize)
2473  /* if we have hit the end of the current displayed subpicture,
2474  or if we need to initialize the system, update the
2475  overlayed subpicture and its start/end times */
2476  sub2video_update(ifp, pts2 + 1, NULL);
2477  else
2478  sub2video_push_ref(ifp, pts2);
2479 }
2480 
2481 static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
2482 {
2483  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2484  int ret;
2485 
2486  if (buffer) {
2487  AVFrame *tmp;
2488 
2489  if (!frame)
2490  return 0;
2491 
2492  tmp = av_frame_alloc();
2493  if (!tmp)
2494  return AVERROR(ENOMEM);
2495 
2497 
2498  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2499  if (ret < 0) {
2500  av_frame_free(&tmp);
2501  return ret;
2502  }
2503 
2504  return 0;
2505  }
2506 
2507  // heartbeat frame
2508  if (frame && !frame->buf[0]) {
2510  return 0;
2511  }
2512 
2513  if (!frame) {
2514  if (ifp->sub2video.end_pts < INT64_MAX)
2515  sub2video_update(ifp, INT64_MAX, NULL);
2516 
2517  return av_buffersrc_add_frame(ifp->filter, NULL);
2518  }
2519 
2520  ifp->width = frame->width ? frame->width : ifp->width;
2521  ifp->height = frame->height ? frame->height : ifp->height;
2522 
2523  sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
2524 
2525  return 0;
2526 }
2527 
2528 static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter,
2529  int64_t pts, AVRational tb)
2530 {
2531  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2532  int ret;
2533 
2534  if (fgt->eof_in[ifp->index])
2535  return 0;
2536 
2537  fgt->eof_in[ifp->index] = 1;
2538 
2539  if (ifp->filter) {
2540  pts = av_rescale_q_rnd(pts, tb, ifp->time_base,
2542 
2544  if (ret < 0)
2545  return ret;
2546  } else {
2547  if (ifp->format < 0) {
2548  // the filtergraph was never configured, use the fallback parameters
2549  ifp->format = ifp->opts.fallback->format;
2550  ifp->sample_rate = ifp->opts.fallback->sample_rate;
2551  ifp->width = ifp->opts.fallback->width;
2552  ifp->height = ifp->opts.fallback->height;
2554  ifp->color_space = ifp->opts.fallback->colorspace;
2555  ifp->color_range = ifp->opts.fallback->color_range;
2556  ifp->time_base = ifp->opts.fallback->time_base;
2557 
2559  &ifp->opts.fallback->ch_layout);
2560  if (ret < 0)
2561  return ret;
2562 
2563  if (ifilter_has_all_input_formats(ifilter->graph)) {
2564  ret = configure_filtergraph(ifilter->graph, fgt);
2565  if (ret < 0) {
2566  av_log(NULL, AV_LOG_ERROR, "Error initializing filters!\n");
2567  return ret;
2568  }
2569  }
2570  }
2571 
2572  if (ifp->format < 0) {
2574  "Cannot determine format of input %s after EOF\n",
2575  ifp->opts.name);
2576  return AVERROR_INVALIDDATA;
2577  }
2578  }
2579 
2580  return 0;
2581 }
2582 
2584  VIDEO_CHANGED = (1 << 0),
2585  AUDIO_CHANGED = (1 << 1),
2586  MATRIX_CHANGED = (1 << 2),
2587  HWACCEL_CHANGED = (1 << 3)
2588 };
2589 
2590 static const char *unknown_if_null(const char *str)
2591 {
2592  return str ? str : "unknown";
2593 }
2594 
2596  InputFilter *ifilter, AVFrame *frame)
2597 {
2598  InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
2599  FrameData *fd;
2600  AVFrameSideData *sd;
2601  int need_reinit = 0, ret;
2602 
2603  /* determine if the parameters for this input changed */
2604  switch (ifp->type) {
2605  case AVMEDIA_TYPE_AUDIO:
2606  if (ifp->format != frame->format ||
2607  ifp->sample_rate != frame->sample_rate ||
2609  need_reinit |= AUDIO_CHANGED;
2610  break;
2611  case AVMEDIA_TYPE_VIDEO:
2612  if (ifp->format != frame->format ||
2613  ifp->width != frame->width ||
2614  ifp->height != frame->height ||
2615  ifp->color_space != frame->colorspace ||
2616  ifp->color_range != frame->color_range)
2617  need_reinit |= VIDEO_CHANGED;
2618  break;
2619  }
2620 
2622  if (!ifp->displaymatrix_present ||
2623  memcmp(sd->data, ifp->displaymatrix, sizeof(ifp->displaymatrix)))
2624  need_reinit |= MATRIX_CHANGED;
2625  } else if (ifp->displaymatrix_present)
2626  need_reinit |= MATRIX_CHANGED;
2627 
2628  if (!(ifp->opts.flags & IFILTER_FLAG_REINIT) && fgt->graph)
2629  need_reinit = 0;
2630 
2631  if (!!ifp->hw_frames_ctx != !!frame->hw_frames_ctx ||
2632  (ifp->hw_frames_ctx && ifp->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2633  need_reinit |= HWACCEL_CHANGED;
2634 
2635  if (need_reinit) {
2637  if (ret < 0)
2638  return ret;
2639  }
2640 
2641  /* (re)init the graph if possible, otherwise buffer the frame and return */
2642  if (need_reinit || !fgt->graph) {
2643  AVFrame *tmp = av_frame_alloc();
2644 
2645  if (!tmp)
2646  return AVERROR(ENOMEM);
2647 
2648  if (!ifilter_has_all_input_formats(fg)) {
2650 
2651  ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
2652  if (ret < 0)
2653  av_frame_free(&tmp);
2654 
2655  return ret;
2656  }
2657 
2658  ret = fgt->graph ? read_frames(fg, fgt, tmp) : 0;
2659  av_frame_free(&tmp);
2660  if (ret < 0)
2661  return ret;
2662 
2663  if (fgt->graph) {
2664  AVBPrint reason;
2666  if (need_reinit & AUDIO_CHANGED) {
2667  const char *sample_format_name = av_get_sample_fmt_name(frame->format);
2668  av_bprintf(&reason, "audio parameters changed to %d Hz, ", frame->sample_rate);
2670  av_bprintf(&reason, ", %s, ", unknown_if_null(sample_format_name));
2671  }
2672  if (need_reinit & VIDEO_CHANGED) {
2673  const char *pixel_format_name = av_get_pix_fmt_name(frame->format);
2674  const char *color_space_name = av_color_space_name(frame->colorspace);
2675  const char *color_range_name = av_color_range_name(frame->color_range);
2676  av_bprintf(&reason, "video parameters changed to %s(%s, %s), %dx%d, ",
2677  unknown_if_null(pixel_format_name), unknown_if_null(color_range_name),
2678  unknown_if_null(color_space_name), frame->width, frame->height);
2679  }
2680  if (need_reinit & MATRIX_CHANGED)
2681  av_bprintf(&reason, "display matrix changed, ");
2682  if (need_reinit & HWACCEL_CHANGED)
2683  av_bprintf(&reason, "hwaccel changed, ");
2684  if (reason.len > 1)
2685  reason.str[reason.len - 2] = '\0'; // remove last comma
2686  av_log(fg, AV_LOG_INFO, "Reconfiguring filter graph%s%s\n", reason.len ? " because " : "", reason.str);
2687  }
2688 
2689  ret = configure_filtergraph(fg, fgt);
2690  if (ret < 0) {
2691  av_log(fg, AV_LOG_ERROR, "Error reinitializing filters!\n");
2692  return ret;
2693  }
2694  }
2695 
2698  frame->time_base = ifp->time_base;
2699 
2700  fd = frame_data(frame);
2701  if (!fd)
2702  return AVERROR(ENOMEM);
2704 
2707  if (ret < 0) {
2709  if (ret != AVERROR_EOF)
2710  av_log(fg, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2711  return ret;
2712  }
2713 
2714  return 0;
2715 }
2716 
2717 static void fg_thread_set_name(const FilterGraph *fg)
2718 {
2719  char name[16];
2720  if (filtergraph_is_simple(fg)) {
2721  OutputStream *ost = fg->outputs[0]->ost;
2722  snprintf(name, sizeof(name), "%cf#%d:%d",
2723  av_get_media_type_string(ost->type)[0],
2724  ost->file->index, ost->index);
2725  } else {
2726  snprintf(name, sizeof(name), "fc%d", fg->index);
2727  }
2728 
2730 }
2731 
2733 {
2734  if (fgt->frame_queue_out) {
2735  AVFrame *frame;
2736  while (av_fifo_read(fgt->frame_queue_out, &frame, 1) >= 0)
2737  av_frame_free(&frame);
2739  }
2740 
2741  av_frame_free(&fgt->frame);
2742  av_freep(&fgt->eof_in);
2743  av_freep(&fgt->eof_out);
2744 
2745  avfilter_graph_free(&fgt->graph);
2746 
2747  memset(fgt, 0, sizeof(*fgt));
2748 }
2749 
2750 static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
2751 {
2752  memset(fgt, 0, sizeof(*fgt));
2753 
2754  fgt->frame = av_frame_alloc();
2755  if (!fgt->frame)
2756  goto fail;
2757 
2758  fgt->eof_in = av_calloc(fg->nb_inputs, sizeof(*fgt->eof_in));
2759  if (!fgt->eof_in)
2760  goto fail;
2761 
2762  fgt->eof_out = av_calloc(fg->nb_outputs, sizeof(*fgt->eof_out));
2763  if (!fgt->eof_out)
2764  goto fail;
2765 
2767  if (!fgt->frame_queue_out)
2768  goto fail;
2769 
2770  return 0;
2771 
2772 fail:
2773  fg_thread_uninit(fgt);
2774  return AVERROR(ENOMEM);
2775 }
2776 
2777 static int filter_thread(void *arg)
2778 {
2779  FilterGraphPriv *fgp = arg;
2780  FilterGraph *fg = &fgp->fg;
2781 
2782  FilterGraphThread fgt;
2783  int ret = 0, input_status = 0;
2784 
2785  ret = fg_thread_init(&fgt, fg);
2786  if (ret < 0)
2787  goto finish;
2788 
2789  fg_thread_set_name(fg);
2790 
2791  // if we have all input parameters the graph can now be configured
2793  ret = configure_filtergraph(fg, &fgt);
2794  if (ret < 0) {
2795  av_log(fg, AV_LOG_ERROR, "Error configuring filter graph: %s\n",
2796  av_err2str(ret));
2797  goto finish;
2798  }
2799  }
2800 
2801  while (1) {
2802  InputFilter *ifilter;
2803  InputFilterPriv *ifp;
2804  enum FrameOpaque o;
2805  unsigned input_idx = fgt.next_in;
2806 
2807  input_status = sch_filter_receive(fgp->sch, fgp->sch_idx,
2808  &input_idx, fgt.frame);
2809  if (input_status == AVERROR_EOF) {
2810  av_log(fg, AV_LOG_VERBOSE, "Filtering thread received EOF\n");
2811  break;
2812  } else if (input_status == AVERROR(EAGAIN)) {
2813  // should only happen when we didn't request any input
2814  av_assert0(input_idx == fg->nb_inputs);
2815  goto read_frames;
2816  }
2817  av_assert0(input_status >= 0);
2818 
2819  o = (intptr_t)fgt.frame->opaque;
2820 
2821  o = (intptr_t)fgt.frame->opaque;
2822 
2823  // message on the control stream
2824  if (input_idx == fg->nb_inputs) {
2825  FilterCommand *fc;
2826 
2827  av_assert0(o == FRAME_OPAQUE_SEND_COMMAND && fgt.frame->buf[0]);
2828 
2829  fc = (FilterCommand*)fgt.frame->buf[0]->data;
2830  send_command(fg, fgt.graph, fc->time, fc->target, fc->command, fc->arg,
2831  fc->all_filters);
2832  av_frame_unref(fgt.frame);
2833  continue;
2834  }
2835 
2836  // we received an input frame or EOF
2837  ifilter = fg->inputs[input_idx];
2838  ifp = ifp_from_ifilter(ifilter);
2839 
2840  if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
2841  int hb_frame = input_status >= 0 && o == FRAME_OPAQUE_SUB_HEARTBEAT;
2842  ret = sub2video_frame(ifilter, (fgt.frame->buf[0] || hb_frame) ? fgt.frame : NULL,
2843  !fgt.graph);
2844  } else if (fgt.frame->buf[0]) {
2845  ret = send_frame(fg, &fgt, ifilter, fgt.frame);
2846  } else {
2848  ret = send_eof(&fgt, ifilter, fgt.frame->pts, fgt.frame->time_base);
2849  }
2850  av_frame_unref(fgt.frame);
2851  if (ret == AVERROR_EOF) {
2852  av_log(fg, AV_LOG_VERBOSE, "Input %u no longer accepts new data\n",
2853  input_idx);
2854  sch_filter_receive_finish(fgp->sch, fgp->sch_idx, input_idx);
2855  continue;
2856  }
2857  if (ret < 0)
2858  goto finish;
2859 
2860 read_frames:
2861  // retrieve all newly avalable frames
2862  ret = read_frames(fg, &fgt, fgt.frame);
2863  if (ret == AVERROR_EOF) {
2864  av_log(fg, AV_LOG_VERBOSE, "All consumers returned EOF\n");
2865  break;
2866  } else if (ret < 0) {
2867  av_log(fg, AV_LOG_ERROR, "Error sending frames to consumers: %s\n",
2868  av_err2str(ret));
2869  goto finish;
2870  }
2871  }
2872 
2873  for (unsigned i = 0; i < fg->nb_outputs; i++) {
2875 
2876  if (fgt.eof_out[i] || !fgt.graph)
2877  continue;
2878 
2879  ret = fg_output_frame(ofp, &fgt, NULL);
2880  if (ret < 0)
2881  goto finish;
2882  }
2883 
2884 finish:
2885  // EOF is normal termination
2886  if (ret == AVERROR_EOF)
2887  ret = 0;
2888 
2889  fg_thread_uninit(&fgt);
2890 
2891  return ret;
2892 }
2893 
2894 void fg_send_command(FilterGraph *fg, double time, const char *target,
2895  const char *command, const char *arg, int all_filters)
2896 {
2897  FilterGraphPriv *fgp = fgp_from_fg(fg);
2898  AVBufferRef *buf;
2899  FilterCommand *fc;
2900 
2901  fc = av_mallocz(sizeof(*fc));
2902  if (!fc)
2903  return;
2904 
2905  buf = av_buffer_create((uint8_t*)fc, sizeof(*fc), filter_command_free, NULL, 0);
2906  if (!buf) {
2907  av_freep(&fc);
2908  return;
2909  }
2910 
2911  fc->target = av_strdup(target);
2912  fc->command = av_strdup(command);
2913  fc->arg = av_strdup(arg);
2914  if (!fc->target || !fc->command || !fc->arg) {
2915  av_buffer_unref(&buf);
2916  return;
2917  }
2918 
2919  fc->time = time;
2920  fc->all_filters = all_filters;
2921 
2922  fgp->frame->buf[0] = buf;
2923  fgp->frame->opaque = (void*)(intptr_t)FRAME_OPAQUE_SEND_COMMAND;
2924 
2925  sch_filter_command(fgp->sch, fgp->sch_idx, fgp->frame);
2926 }
AV_OPT_SEARCH_CHILDREN
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:522
SCH_FILTER_OUT
#define SCH_FILTER_OUT(filter, output)
Definition: ffmpeg_sched.h:122
AVSubtitle
Definition: avcodec.h:2214
formats
formats
Definition: signature.h:48
set_channel_layout
static int set_channel_layout(OutputFilterPriv *f, OutputStream *ost)
Definition: ffmpeg_filter.c:741
configure_input_filter
static int configure_input_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1662
FilterGraphThread::next_in
unsigned next_in
Definition: ffmpeg_filter.c:94
AVCodec
AVCodec.
Definition: codec.h:187
AVMEDIA_TYPE_SUBTITLE
@ AVMEDIA_TYPE_SUBTITLE
Definition: avutil.h:204
av_gettime_relative
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
AVFILTER_CMD_FLAG_ONE
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:693
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:186
AVFrame::color_range
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:619
AV_BPRINT_SIZE_UNLIMITED
#define AV_BPRINT_SIZE_UNLIMITED
av_buffersink_get_ch_layout
int av_buffersink_get_ch_layout(const AVFilterContext *ctx, AVChannelLayout *out)
Definition: buffersink.c:198
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:71
av_buffersink_get_sample_aspect_ratio
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
name
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf default minimum maximum flags name is the option name
Definition: writing_filters.txt:88
avfilter_filter_pad_count
unsigned avfilter_filter_pad_count(const AVFilter *filter, int is_output)
Get the number of elements in an AVFilter's inputs or outputs array.
Definition: avfilter.c:615
extra_bits
#define extra_bits(eb)
Definition: intrax8.c:119
OutputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:273
av_clip
#define av_clip
Definition: common.h:98
InputFilterPriv::type
enum AVMediaType type
Definition: ffmpeg_filter.c:120
sch_filter_send
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame)
Called by filtergraph tasks to send a filtered frame or EOF to consumers.
Definition: ffmpeg_sched.c:2425
VSYNC_VFR
@ VSYNC_VFR
Definition: ffmpeg.h:69
OutputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:205
av_bprint_is_complete
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:218
r
const char * r
Definition: vf_curves.c:126
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
nb_input_files
int nb_input_files
Definition: ffmpeg.c:126
AVSubtitle::rects
AVSubtitleRect ** rects
Definition: avcodec.h:2219
opt.h
choose_input
static int choose_input(const FilterGraph *fg, const FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1920
get_rotation
double get_rotation(const int32_t *displaymatrix)
Definition: cmdutils.c:1114
FilterGraphPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:64
FilterGraphPriv::sch
Scheduler * sch
Definition: ffmpeg_filter.c:68
AVCodecParameters::codec_type
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:51
FilterGraphThread::got_frame
int got_frame
Definition: ffmpeg_filter.c:96
AVFilterGraph::nb_threads
int nb_threads
Maximum number of threads used by filters in this graph.
Definition: avfilter.h:839
InputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:137
avfilter_pad_get_name
const char * avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
Get the name of an AVFilterPad.
Definition: avfilter.c:972
FrameData
Definition: ffmpeg.h:593
send_command
static void send_command(FilterGraph *fg, AVFilterGraph *graph, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:1895
InputFilterPriv::last_pts
int64_t last_pts
Definition: ffmpeg_filter.c:167
avfilter_graph_segment_create_filters
int avfilter_graph_segment_create_filters(AVFilterGraphSegment *seg, int flags)
Create filters specified in a graph segment.
Definition: graphparser.c:516
out
FILE * out
Definition: movenc.c:54
av_frame_get_buffer
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:216
av_bprint_init
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
av_frame_get_side_data
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:716
InputFilterPriv::filter
AVFilterContext * filter
Definition: ffmpeg_filter.c:110
init_simple_filtergraph
int init_simple_filtergraph(InputStream *ist, OutputStream *ost, char *graph_desc, Scheduler *sch, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:1096
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:947
AVFrame::duration
int64_t duration
Duration of the frame, in the same units as pts.
Definition: frame.h:746
av_pix_fmt_desc_get
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2962
AVERROR_EOF
#define AVERROR_EOF
End of file.
Definition: error.h:57
AVBufferRef::data
uint8_t * data
The data buffer.
Definition: buffer.h:90
FilterGraph::inputs
InputFilter ** inputs
Definition: ffmpeg.h:290
av_buffersink_get_frame_flags
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:120
av_dict_count
int av_dict_count(const AVDictionary *m)
Get number of entries in dictionary.
Definition: dict.c:39
ifilter_parameters_from_frame
static int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
Definition: ffmpeg_filter.c:1854
HWACCEL_CHANGED
@ HWACCEL_CHANGED
Definition: ffmpeg_filter.c:2587
frame_drop_threshold
float frame_drop_threshold
Definition: ffmpeg_opt.c:68
AV_TIME_BASE_Q
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:264
InputFilterPriv::time_base
AVRational time_base
Definition: ffmpeg_filter.c:139
output
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce output
Definition: filter_design.txt:225
AV_BUFFERSRC_FLAG_PUSH
@ AV_BUFFERSRC_FLAG_PUSH
Immediately push the frame to the output.
Definition: buffersrc.h:46
FilterCommand::arg
char * arg
Definition: ffmpeg_filter.c:243
AVSubtitleRect
Definition: avcodec.h:2187
av_asprintf
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:115
AVSubtitle::num_rects
unsigned num_rects
Definition: avcodec.h:2218
fg_free
void fg_free(FilterGraph **pfg)
Definition: ffmpeg_filter.c:908
FPSConvContext::frames_prev_hist
int64_t frames_prev_hist[3]
Definition: ffmpeg_filter.c:187
AV_CODEC_ID_MPEG4
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:64
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:88
OutputFile::start_time
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:586
AVFrame::opaque
void * opaque
Frame owner's private data.
Definition: frame.h:488
AVFrame::colorspace
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:630
InputFile::index
int index
Definition: ffmpeg.h:392
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:340
tmp
static uint8_t tmp[11]
Definition: aes_ctr.c:28
AVFilterInOut::next
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:967
pixdesc.h
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:452
AVFrame::width
int width
Definition: frame.h:412
FilterGraphPriv::log_name
char log_name[32]
Definition: ffmpeg_filter.c:49
w
uint8_t w
Definition: llviddspenc.c:38
FilterGraphPriv::have_sources
int have_sources
Definition: ffmpeg_filter.c:56
AVOption
AVOption.
Definition: opt.h:346
fg_output_frame
static int fg_output_frame(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2236
av_buffersrc_add_frame
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:174
FilterGraph::index
int index
Definition: ffmpeg.h:288
InputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:136
data
const char data[16]
Definition: mxf.c:148
FPSConvContext::last_dropped
int last_dropped
Definition: ffmpeg_filter.c:191
OutputFilterPriv::ts_offset
int64_t ts_offset
Definition: ffmpeg_filter.c:230
cleanup_filtergraph
static void cleanup_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1672
ffmpeg.h
AV_LOG_VERBOSE
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:196
fc
#define fc(width, name, range_min, range_max)
Definition: cbs_av1.c:464
FilterGraph::nb_inputs
int nb_inputs
Definition: ffmpeg.h:291
VIDEO_CHANGED
@ VIDEO_CHANGED
Definition: ffmpeg_filter.c:2584
AV_FRAME_DATA_DISPLAYMATRIX
@ AV_FRAME_DATA_DISPLAYMATRIX
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:85
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFrame::flags
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:612
ofp_from_ofilter
static OutputFilterPriv * ofp_from_ofilter(OutputFilter *ofilter)
Definition: ffmpeg_filter.c:235
AVChannelLayout::order
enum AVChannelOrder order
Channel order used in this layout.
Definition: channel_layout.h:308
av_buffer_ref
AVBufferRef * av_buffer_ref(const AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:103
AV_BUFFERSRC_FLAG_KEEP_REF
@ AV_BUFFERSRC_FLAG_KEEP_REF
Keep a reference to the frame.
Definition: buffersrc.h:53
IFILTER_FLAG_AUTOROTATE
@ IFILTER_FLAG_AUTOROTATE
Definition: ffmpeg.h:239
AVChannelLayout::nb_channels
int nb_channels
Number of channels in this layout.
Definition: channel_layout.h:313
AVFrame::buf
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:553
avio_size
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:322
av_strlcatf
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:103
ost
static AVStream * ost
Definition: vaapi_transcode.c:42
sample_rate
sample_rate
Definition: ffmpeg_filter.c:425
fg_output_step
static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2319
FilterGraphPriv
Definition: ffmpeg_filter.c:45
av_channel_layout_describe_bprint
int av_channel_layout_describe_bprint(const AVChannelLayout *channel_layout, AVBPrint *bp)
bprint variant of av_channel_layout_describe().
Definition: channel_layout.c:590
AVFrame::data
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:361
av_malloc
#define av_malloc(s)
Definition: tableprint_vlc.h:30
FilterGraphThread::eof_in
uint8_t * eof_in
Definition: ffmpeg_filter.c:99
avfilter_graph_free
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
Definition: avfiltergraph.c:116
configure_filtergraph
static int configure_filtergraph(FilterGraph *fg, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:1707
choose_pix_fmts
static int choose_pix_fmts(OutputFilter *ofilter, AVBPrint *bprint, const char **dst)
Definition: ffmpeg_filter.c:370
OutputFile::nb_streams
int nb_streams
Definition: ffmpeg.h:583
AUTO_INSERT_FILTER
#define AUTO_INSERT_FILTER(opt_name, filter_name, arg)
InputStream
Definition: ffmpeg.h:345
filter_nbthreads
char * filter_nbthreads
Definition: ffmpeg_opt.c:82
debug_ts
int debug_ts
Definition: ffmpeg_opt.c:76
fg_finalise_bindings
int fg_finalise_bindings(FilterGraph *fg)
Definition: ffmpeg_filter.c:1221
InputFilterOptions::trim_start_us
int64_t trim_start_us
Definition: ffmpeg.h:245
InputFilterOptions::flags
unsigned flags
Definition: ffmpeg.h:261
avfilter_graph_create_filter
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
Definition: avfiltergraph.c:137
avfilter_graph_alloc_filter
AVFilterContext * avfilter_graph_alloc_filter(AVFilterGraph *graph, const AVFilter *filter, const char *name)
Create a new filter instance in a filter graph.
Definition: avfiltergraph.c:164
finish
static void finish(void)
Definition: movenc.c:342
AV_OPT_TYPE_BINARY
@ AV_OPT_TYPE_BINARY
offset must point to a pointer immediately followed by an int for the length
Definition: opt.h:241
av_color_space_name
const char * av_color_space_name(enum AVColorSpace space)
Definition: pixdesc.c:3338
FRAME_OPAQUE_SUB_HEARTBEAT
@ FRAME_OPAQUE_SUB_HEARTBEAT
Definition: ffmpeg.h:88
OutputFilterPriv
Definition: ffmpeg_filter.c:200
fg_thread_uninit
static void fg_thread_uninit(FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2732
fail
#define fail()
Definition: checkasm.h:179
av_fifo_write
int av_fifo_write(AVFifo *f, const void *buf, size_t nb_elems)
Write data into a FIFO.
Definition: fifo.c:188
sub2video_push_ref
static void sub2video_push_ref(InputFilterPriv *ifp, int64_t pts)
Definition: ffmpeg_filter.c:313
avfilter_graph_alloc
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:82
AV_PIX_FMT_FLAG_HWACCEL
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:128
FFSIGN
#define FFSIGN(a)
Definition: common.h:73
samplefmt.h
AVERROR_OPTION_NOT_FOUND
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:63
avfilter_graph_segment_free
void avfilter_graph_segment_free(AVFilterGraphSegment **seg)
Free the provided AVFilterGraphSegment and everything associated with it.
Definition: graphparser.c:276
sub2video_get_blank_frame
static int sub2video_get_blank_frame(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:260
AV_BPRINT_SIZE_AUTOMATIC
#define AV_BPRINT_SIZE_AUTOMATIC
ifilter_has_all_input_formats
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg_filter.c:619
val
static double val(void *priv, double ch)
Definition: aeval.c:78
OutputFilterPriv::index
int index
Definition: ffmpeg_filter.c:203
AVFrame::ch_layout
AVChannelLayout ch_layout
Channel layout of the audio data.
Definition: frame.h:741
SCH_ENC
#define SCH_ENC(encoder)
Definition: ffmpeg_sched.h:116
configure_input_video_filter
static int configure_input_video_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1521
type
it s the only field you need to keep assuming you have a context There is some magic you don t need to care about around this just let it vf type
Definition: writing_filters.txt:86
avfilter_graph_segment_parse
int avfilter_graph_segment_parse(AVFilterGraph *graph, const char *graph_str, int flags, AVFilterGraphSegment **seg)
Parse a textual filtergraph description into an intermediate form.
Definition: graphparser.c:460
pts
static int64_t pts
Definition: transcode_aac.c:643
av_opt_set
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:738
graph_is_meta
static int graph_is_meta(AVFilterGraph *graph)
Definition: ffmpeg_filter.c:1688
FilterGraphThread::frame
AVFrame * frame
Definition: ffmpeg_filter.c:86
AVFILTER_FLAG_DYNAMIC_INPUTS
#define AVFILTER_FLAG_DYNAMIC_INPUTS
The number of the filter inputs is not determined just by AVFilter.inputs.
Definition: avfilter.h:106
FrameData::tb
AVRational tb
Definition: ffmpeg.h:603
fgp_from_fg
static FilterGraphPriv * fgp_from_fg(FilterGraph *fg)
Definition: ffmpeg_filter.c:72
OutputFilterPriv::sample_rate
int sample_rate
Definition: ffmpeg_filter.c:210
av_reduce
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
FPSConvContext::dropped_keyframe
int dropped_keyframe
Definition: ffmpeg_filter.c:192
AVRational::num
int num
Numerator.
Definition: rational.h:59
LATENCY_PROBE_FILTER_PRE
@ LATENCY_PROBE_FILTER_PRE
Definition: ffmpeg.h:102
InputFilterOptions::trim_end_us
int64_t trim_end_us
Definition: ffmpeg.h:246
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:33
sch_add_filtergraph
int sch_add_filtergraph(Scheduler *sch, unsigned nb_inputs, unsigned nb_outputs, SchThreadFunc func, void *ctx)
Add a filtergraph to the scheduler.
Definition: ffmpeg_sched.c:848
av_frame_alloc
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:76
sub2video_heartbeat
static void sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2459
check_stream_specifier
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the given stream matches a stream specifier.
Definition: cmdutils.c:982
OutputFile::shortest
int shortest
Definition: ffmpeg.h:588
avfilter_inout_free
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:76
avassert.h
FrameData::frame_rate_filter
AVRational frame_rate_filter
Definition: ffmpeg.h:606
AV_LOG_ERROR
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:180
send_eof
static int send_eof(FilterGraphThread *fgt, InputFilter *ifilter, int64_t pts, AVRational tb)
Definition: ffmpeg_filter.c:2528
FF_ARRAY_ELEMS
#define FF_ARRAY_ELEMS(a)
Definition: sinewin_tablegen.c:29
InputFilterPriv
Definition: ffmpeg_filter.c:103
av_fifo_read
int av_fifo_read(AVFifo *f, void *buf, size_t nb_elems)
Read data from a FIFO.
Definition: fifo.c:240
fg_complex_bind_input
static int fg_complex_bind_input(FilterGraph *fg, InputFilter *ifilter)
Definition: ffmpeg_filter.c:1137
AV_FRAME_FLAG_KEY
#define AV_FRAME_FLAG_KEY
A flag to mark frames that are keyframes.
Definition: frame.h:591
duration
int64_t duration
Definition: movenc.c:64
av_buffersink_get_frame_rate
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
ifilter_alloc
static InputFilter * ifilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:880
AVFilterChain::filters
AVFilterParams ** filters
Definition: avfilter.h:1143
av_dict_get
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:62
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:86
filter_command_free
static void filter_command_free(void *opaque, uint8_t *data)
Definition: ffmpeg_filter.c:249
VSYNC_VSCFR
@ VSYNC_VSCFR
Definition: ffmpeg.h:70
llrintf
#define llrintf(x)
Definition: libm.h:399
s
#define s(width, name)
Definition: cbs_vp9.c:198
FilterGraphPriv::frame_enc
AVFrame * frame_enc
Definition: ffmpeg_filter.c:66
InputFilterPriv::frame
AVFrame * frame
Definition: ffmpeg_filter.c:113
FilterGraph::outputs
OutputFilter ** outputs
Definition: ffmpeg.h:292
format
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample format(the sample packing is implied by the sample format) and sample rate. The lists are not just lists
ofilter_alloc
static OutputFilter * ofilter_alloc(FilterGraph *fg)
Definition: ffmpeg_filter.c:643
AVDictionaryEntry::key
char * key
Definition: dict.h:90
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
configure_output_video_filter
static int configure_output_video_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1320
AV_CHANNEL_ORDER_UNSPEC
@ AV_CHANNEL_ORDER_UNSPEC
Only the channel count is specified, without any further information about the channel order.
Definition: channel_layout.h:112
av_q2d
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
OutputFilter::linklabel
uint8_t * linklabel
Definition: ffmpeg.h:278
InputFilter
Definition: ffmpeg.h:266
FilterGraphPriv::nb_outputs_done
unsigned nb_outputs_done
Definition: ffmpeg_filter.c:59
InputFilterPriv::fallback
struct InputFilterPriv::@6 fallback
av_buffersink_get_format
int av_buffersink_get_format(const AVFilterContext *ctx)
av_buffersink_get_time_base
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
av_assert0
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:40
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:304
OutputFilter::ost
struct OutputStream * ost
Definition: ffmpeg.h:272
ist_filter_add
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, InputFilterOptions *opts)
Definition: ffmpeg_demux.c:981
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:201
FrameData::dec
struct FrameData::@4 dec
ctx
AVFormatContext * ctx
Definition: movenc.c:48
nb_streams
static int nb_streams
Definition: ffprobe.c:383
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVSubtitle::pts
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2220
fg_thread_init
static int fg_thread_init(FilterGraphThread *fgt, const FilterGraph *fg)
Definition: ffmpeg_filter.c:2750
InputFilterOptions::name
uint8_t * name
Definition: ffmpeg.h:248
graph_opts_apply
static int graph_opts_apply(AVFilterGraphSegment *seg)
Definition: ffmpeg_filter.c:551
InputFilter::graph
struct FilterGraph * graph
Definition: ffmpeg.h:267
av_get_sample_fmt_name
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:51
key
const char * key
Definition: hwcontext_opencl.c:189
fsize
static int64_t fsize(FILE *f)
Definition: audiomatch.c:29
OutputFilterPriv::fps
FPSConvContext fps
Definition: ffmpeg_filter.c:232
fg_item_name
static const char * fg_item_name(void *obj)
Definition: ffmpeg_filter.c:959
command
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:1185
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:87
frame
static AVFrame * frame
Definition: demux_decode.c:54
arg
const char * arg
Definition: jacosubdec.c:67
OutputFilterPriv::ch_layouts
const AVChannelLayout * ch_layouts
Definition: ffmpeg_filter.c:225
OutputFilterPriv::width
int width
Definition: ffmpeg_filter.c:209
av_color_range_name
const char * av_color_range_name(enum AVColorRange range)
Definition: pixdesc.c:3278
AVFormatContext
Format I/O context.
Definition: avformat.h:1255
avfilter_get_by_name
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:629
AVStream::codecpar
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:766
LIBAVUTIL_VERSION_INT
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:66
OutputFilter::name
uint8_t * name
Definition: ffmpeg.h:274
fabs
static __device__ float fabs(float a)
Definition: cuda_runtime.h:182
avfilter_graph_config
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
Definition: avfiltergraph.c:1319
OutputFilterPriv::enc_timebase
AVRational enc_timebase
Definition: ffmpeg_filter.c:228
avfilter_graph_segment_apply
int avfilter_graph_segment_apply(AVFilterGraphSegment *seg, int flags, AVFilterInOut **inputs, AVFilterInOut **outputs)
Apply all filter/link descriptions from a graph segment to the associated filtergraph.
Definition: graphparser.c:881
InputFilterPriv::color_space
enum AVColorSpace color_space
Definition: ffmpeg_filter.c:133
NULL
#define NULL
Definition: coverity.c:32
av_opt_set_bin
int av_opt_set_bin(void *obj, const char *name, const uint8_t *val, int len, int search_flags)
Definition: opt.c:804
OutputFilterPriv::ch_layout
AVChannelLayout ch_layout
Definition: ffmpeg_filter.c:211
AVFilterParams
Parameters describing a filter to be created in a filtergraph.
Definition: avfilter.h:1075
FPSConvContext::dup_warning
uint64_t dup_warning
Definition: ffmpeg_filter.c:189
av_buffer_unref
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it.
Definition: buffer.c:139
InputStream::st
AVStream * st
Definition: ffmpeg.h:353
dec_filter_add
int dec_filter_add(Decoder *dec, InputFilter *ifilter, InputFilterOptions *opts)
Definition: ffmpeg_dec.c:1337
avfilter_graph_set_auto_convert
void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags)
Enable or disable automatic format conversion inside the graph.
Definition: avfiltergraph.c:159
InputFilterPriv::displaymatrix_present
int displaymatrix_present
Definition: ffmpeg_filter.c:145
Decoder
Definition: ffmpeg.h:331
AVFilterParams::filter
AVFilterContext * filter
The filter context.
Definition: avfilter.h:1086
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AVFilterChain::nb_filters
size_t nb_filters
Definition: avfilter.h:1144
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:85
AVFilterGraph::filters
AVFilterContext ** filters
Definition: avfilter.h:815
InputFilterPriv::linklabel
uint8_t * linklabel
Definition: ffmpeg_filter.c:117
filter_opt_apply
static int filter_opt_apply(AVFilterContext *f, const char *key, const char *val)
Definition: ffmpeg_filter.c:496
OutputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:220
close_output
static int close_output(OutputFilterPriv *ofp, FilterGraphThread *fgt)
Definition: ffmpeg_filter.c:2186
FilterGraphThread::frame_queue_out
AVFifo * frame_queue_out
Definition: ffmpeg_filter.c:91
ifilter_bind_ist
static int ifilter_bind_ist(InputFilter *ifilter, InputStream *ist)
Definition: ffmpeg_filter.c:660
mathops.h
configure_output_audio_filter
static int configure_output_audio_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1400
AV_ROUND_NEAR_INF
@ AV_ROUND_NEAR_INF
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:135
FilterGraphPriv::sch_idx
unsigned sch_idx
Definition: ffmpeg_filter.c:69
FrameData::wallclock
int64_t wallclock[LATENCY_PROBE_NB]
Definition: ffmpeg.h:610
avfilter_graph_request_oldest
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
Definition: avfiltergraph.c:1449
time.h
AVFilterGraphSegment::chains
AVFilterChain ** chains
A list of filter chain contained in this segment.
Definition: avfilter.h:1167
AVFilterGraph
Definition: avfilter.h:813
inputs
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several inputs
Definition: filter_design.txt:243
InputFilterOptions
Definition: ffmpeg.h:244
InputFilterPriv::sample_aspect_ratio
AVRational sample_aspect_ratio
Definition: ffmpeg_filter.c:132
AVCOL_RANGE_UNSPECIFIED
@ AVCOL_RANGE_UNSPECIFIED
Definition: pixfmt.h:649
FilterGraph::nb_outputs
int nb_outputs
Definition: ffmpeg.h:293
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
OutputFilterPriv::formats
const int * formats
Definition: ffmpeg_filter.c:224
av_opt_set_int
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:789
InputStream::par
AVCodecParameters * par
Codec parameters - to be used by the decoding/streamcopy code.
Definition: ffmpeg.h:361
av_buffer_create
AVBufferRef * av_buffer_create(uint8_t *data, size_t size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:55
input_files
InputFile ** input_files
Definition: ffmpeg.c:125
OutputFile::streams
OutputStream ** streams
Definition: ffmpeg.h:582
AV_CLASS_CATEGORY_FILTER
@ AV_CLASS_CATEGORY_FILTER
Definition: log.h:36
Scheduler
Definition: ffmpeg_sched.c:263
FilterGraphPriv::fg
FilterGraph fg
Definition: ffmpeg_filter.c:46
OutputFilterPriv::ofilter
OutputFilter ofilter
Definition: ffmpeg_filter.c:201
FilterGraph
Definition: ffmpeg.h:286
AVFilterGraphSegment
A parsed representation of a filtergraph segment.
Definition: avfilter.h:1156
file_read
char * file_read(const char *filename)
Definition: cmdutils.c:1132
ENC_TIME_BASE_DEMUX
@ ENC_TIME_BASE_DEMUX
Definition: ffmpeg.h:77
InputFilterOptions::sub2video_width
int sub2video_width
Definition: ffmpeg.h:257
AVFilterInOut::pad_idx
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:964
av_buffersrc_close
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:278
AVFilterGraph::scale_sws_opts
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:818
filtergraph_is_simple
int filtergraph_is_simple(const FilterGraph *fg)
Definition: ffmpeg_filter.c:1889
av_opt_find
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1950
IFILTER_FLAG_REINIT
@ IFILTER_FLAG_REINIT
Definition: ffmpeg.h:240
f
f
Definition: af_crystalizer.c:121
AVIOContext
Bytestream IO Context.
Definition: avio.h:160
configure_output_filter
static int configure_output_filter(FilterGraph *fg, AVFilterGraph *graph, OutputFilter *ofilter, AVFilterInOut *out)
Definition: ffmpeg_filter.c:1495
av_ts2timestr
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:77
filter_thread
static int filter_thread(void *arg)
Definition: ffmpeg_filter.c:2777
AVMediaType
AVMediaType
Definition: avutil.h:199
InputFilterPriv::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Definition: ffmpeg_filter.c:143
AVFifo
Definition: fifo.c:35
FRAME_OPAQUE_SEND_COMMAND
@ FRAME_OPAQUE_SEND_COMMAND
Definition: ffmpeg.h:90
FilterGraphThread
Definition: ffmpeg_filter.c:83
av_frame_ref
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:312
av_bprint_finalize
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:240
InputFilterPriv::displaymatrix
int32_t displaymatrix[9]
Definition: ffmpeg_filter.c:146
AVChannelLayout
An AVChannelLayout holds information about the channel layout of audio data.
Definition: channel_layout.h:303
FilterGraphThread::graph
AVFilterGraph * graph
Definition: ffmpeg_filter.c:84
av_buffersrc_parameters_alloc
AVBufferSrcParameters * av_buffersrc_parameters_alloc(void)
Allocate a new AVBufferSrcParameters instance.
Definition: buffersrc.c:105
AVFilterInOut::filter_ctx
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:961
av_err2str
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:121
AVFrame::sample_rate
int sample_rate
Sample rate of the audio data.
Definition: frame.h:539
OutputFilterPriv::tb_out_locked
int tb_out_locked
Definition: ffmpeg_filter.c:218
for
for(k=2;k<=8;++k)
Definition: h264pred_template.c:425
avfilter_link
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:148
AVBufferSrcParameters::hw_frames_ctx
AVBufferRef * hw_frames_ctx
Video with a hwaccel pixel format only.
Definition: buffersrc.h:106
start_time
static int64_t start_time
Definition: ffplay.c:329
AVFILTER_FLAG_HWDEVICE
#define AVFILTER_FLAG_HWDEVICE
The filter can create hardware frames using AVFilterContext.hw_device_ctx.
Definition: avfilter.h:138
adjust_frame_pts_to_encoder_tb
static double adjust_frame_pts_to_encoder_tb(AVFrame *frame, AVRational tb_dst, int64_t start_time)
Definition: ffmpeg_filter.c:2021
InputFilterPriv::color_range
enum AVColorRange color_range
Definition: ffmpeg_filter.c:134
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:56
MATRIX_CHANGED
@ MATRIX_CHANGED
Definition: ffmpeg_filter.c:2586
FilterCommand::time
double time
Definition: ffmpeg_filter.c:245
insert_trim
static int insert_trim(int64_t start_time, int64_t duration, AVFilterContext **last_filter, int *pad_idx, const char *filter_name)
Definition: ffmpeg_filter.c:1247
InputFilterPriv::initialize
unsigned int initialize
Definition: ffmpeg_filter.c:171
graph_parse
static int graph_parse(AVFilterGraph *graph, const char *desc, AVFilterInOut **inputs, AVFilterInOut **outputs, AVBufferRef *hw_device)
Definition: ffmpeg_filter.c:575
avfilter_graph_queue_command
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
Definition: avfiltergraph.c:1367
AV_NOPTS_VALUE
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVFrame::time_base
AVRational time_base
Time base for the timestamps in this frame.
Definition: frame.h:467
AVFrameSideData::data
uint8_t * data
Definition: frame.h:248
read_binary
static int read_binary(const char *path, uint8_t **data, int *len)
Definition: ffmpeg_filter.c:448
AVFrame::format
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames,...
Definition: frame.h:427
FilterGraphPriv::disable_conversions
int disable_conversions
Definition: ffmpeg_filter.c:57
frame_data
FrameData * frame_data(AVFrame *frame)
Get our axiliary frame data attached to the frame, allocating it if needed.
Definition: ffmpeg.c:473
AVSubtitle::end_display_time
uint32_t end_display_time
Definition: avcodec.h:2217
FilterGraphThread::eof_out
uint8_t * eof_out
Definition: ffmpeg_filter.c:100
FilterGraphPriv::graph_desc
const char * graph_desc
Definition: ffmpeg_filter.c:61
allocate_array_elem
void * allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
Atomically add a new element to an array of pointers, i.e.
Definition: cmdutils.c:1104
InputFilterPriv::width
int width
Definition: ffmpeg_filter.c:131
AV_PIX_FMT_RGB32
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:451
filter_is_buffersrc
static int filter_is_buffersrc(const AVFilterContext *f)
Definition: ffmpeg_filter.c:1681
AUDIO_CHANGED
@ AUDIO_CHANGED
Definition: ffmpeg_filter.c:2585
SCH_DEC
#define SCH_DEC(decoder)
Definition: ffmpeg_sched.h:113
sch_filter_receive
int sch_filter_receive(Scheduler *sch, unsigned fg_idx, unsigned *in_idx, AVFrame *frame)
Called by filtergraph tasks to obtain frames for filtering.
Definition: ffmpeg_sched.c:2360
av_dict_free
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values.
Definition: dict.c:223
unknown_if_null
static const char * unknown_if_null(const char *str)
Definition: ffmpeg_filter.c:2590
InputFilterOptions::sub2video_height
int sub2video_height
Definition: ffmpeg.h:258
FF_COMPLIANCE_UNOFFICIAL
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: defs.h:61
decoders
Decoder ** decoders
Definition: ffmpeg.c:134
nb_decoders
int nb_decoders
Definition: ffmpeg.c:135
OutputFilter::type
enum AVMediaType type
Definition: ffmpeg.h:280
read_frames
static int read_frames(FilterGraph *fg, FilterGraphThread *fgt, AVFrame *frame)
Definition: ffmpeg_filter.c:2397
av_channel_layout_compare
int av_channel_layout_compare(const AVChannelLayout *chl, const AVChannelLayout *chl1)
Check whether two channel layouts are semantically the same, i.e.
Definition: channel_layout.c:800
SUBTITLE_BITMAP
@ SUBTITLE_BITMAP
A bitmap, pict will be set.
Definition: avcodec.h:2170
AV_LOG_INFO
#define AV_LOG_INFO
Standard information.
Definition: log.h:191
sample_rates
sample_rates
Definition: ffmpeg_filter.c:425
send_frame
static int send_frame(FilterGraph *fg, FilterGraphThread *fgt, InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg_filter.c:2595
avfilter_init_str
int avfilter_init_str(AVFilterContext *filter, const char *args)
Initialize a filter with the supplied parameters.
Definition: avfilter.c:944
buffersink.h
av_channel_layout_default
void av_channel_layout_default(AVChannelLayout *ch_layout, int nb_channels)
Get the default channel layout for a given number of channels.
Definition: channel_layout.c:830
av_find_nearest_q_idx
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
av_buffersink_get_w
int av_buffersink_get_w(const AVFilterContext *ctx)
FilterCommand::all_filters
int all_filters
Definition: ffmpeg_filter.c:246
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:420
FPSConvContext::framerate_clip
int framerate_clip
Definition: ffmpeg_filter.c:197
bprint.h
FPSConvContext::frame_number
int64_t frame_number
Definition: ffmpeg_filter.c:183
av_buffersrc_parameters_set
int av_buffersrc_parameters_set(AVFilterContext *ctx, AVBufferSrcParameters *param)
Initialize the buffersrc or abuffersrc filter with the provided parameters.
Definition: buffersrc.c:118
FPSConvContext
Definition: ffmpeg_filter.c:180
lrintf
#define lrintf(x)
Definition: libm_mips.h:72
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:255
InputFilterPriv::index
int index
Definition: ffmpeg_filter.c:108
FrameData::bits_per_raw_sample
int bits_per_raw_sample
Definition: ffmpeg.h:608
fg_send_command
void fg_send_command(FilterGraph *fg, double time, const char *target, const char *command, const char *arg, int all_filters)
Definition: ffmpeg_filter.c:2894
FilterGraphPriv::is_simple
int is_simple
Definition: ffmpeg_filter.c:51
InputFilterOptions::fallback
AVFrame * fallback
Definition: ffmpeg.h:263
av_buffersrc_add_frame_flags
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:193
VSYNC_CFR
@ VSYNC_CFR
Definition: ffmpeg.h:68
src2
const pixel * src2
Definition: h264pred_template.c:422
configure_input_audio_filter
static int configure_input_audio_filter(FilterGraph *fg, AVFilterGraph *graph, InputFilter *ifilter, AVFilterInOut *in)
Definition: ffmpeg_filter.c:1621
AVColorSpace
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:609
display.h
FPSConvContext::framerate_max
AVRational framerate_max
Definition: ffmpeg_filter.c:195
av_assert1
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:56
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:55
delta
float delta
Definition: vorbis_enc_data.h:430
FRAME_OPAQUE_EOF
@ FRAME_OPAQUE_EOF
Definition: ffmpeg.h:89
InputFile::ctx
AVFormatContext * ctx
Definition: ffmpeg.h:394
av_frame_move_ref
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:561
cfgp_from_cfg
static const FilterGraphPriv * cfgp_from_cfg(const FilterGraph *fg)
Definition: ffmpeg_filter.c:77
av_frame_unref
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:534
InputFilterPriv::eof
int eof
Definition: ffmpeg_filter.c:125
tb
#define tb
Definition: regdef.h:68
av_mallocz
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:254
av_inv_q
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
av_buffer_replace
int av_buffer_replace(AVBufferRef **pdst, const AVBufferRef *src)
Ensure dst refers to the same data as src.
Definition: buffer.c:233
len
int len
Definition: vorbis_enc_data.h:426
AVCOL_SPC_UNSPECIFIED
@ AVCOL_SPC_UNSPECIFIED
Definition: pixfmt.h:612
ofilter_bind_ost
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost, unsigned sched_idx_enc)
Definition: ffmpeg_filter.c:781
filtergraphs
FilterGraph ** filtergraphs
Definition: ffmpeg.c:131
int_cb
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:327
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Definition: mem.c:262
outputs
static const AVFilterPad outputs[]
Definition: af_aap.c:310
sch_connect
int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst)
Definition: ffmpeg_sched.c:946
FFMPEG_OPT_VSYNC_DROP
#define FFMPEG_OPT_VSYNC_DROP
Definition: ffmpeg.h:59
av_buffersink_get_h
int av_buffersink_get_h(const AVFilterContext *ctx)
sch_filter_command
int sch_filter_command(Scheduler *sch, unsigned fg_idx, AVFrame *frame)
Definition: ffmpeg_sched.c:2462
AVFilter
Filter definition.
Definition: avfilter.h:166
video_sync_process
static void video_sync_process(OutputFilterPriv *ofp, AVFrame *frame, int64_t *nb_frames, int64_t *nb_frames_prev)
Definition: ffmpeg_filter.c:2063
ifp_from_ifilter
static InputFilterPriv * ifp_from_ifilter(InputFilter *ifilter)
Definition: ffmpeg_filter.c:175
fg_create
int fg_create(FilterGraph **pfg, char *graph_desc, Scheduler *sch)
Create a new filtergraph in the global filtergraph list.
Definition: ffmpeg_filter.c:973
mid_pred
#define mid_pred
Definition: mathops.h:98
AV_BUFFERSINK_FLAG_NO_REQUEST
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:97
ret
ret
Definition: filter_design.txt:187
AVStream
Stream structure.
Definition: avformat.h:743
AV_LOG_FATAL
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:174
pixfmt.h
AVClass::class_name
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
InputFilter::name
uint8_t * name
Definition: ffmpeg.h:268
VSYNC_DROP
@ VSYNC_DROP
Definition: ffmpeg.h:72
av_opt_eval_int
int av_opt_eval_int(void *obj, const AVOption *o, const char *val, int *int_out)
av_channel_layout_check
int av_channel_layout_check(const AVChannelLayout *channel_layout)
Check whether a channel layout is valid, i.e.
Definition: channel_layout.c:774
FPSConvContext::last_frame
AVFrame * last_frame
Definition: ffmpeg_filter.c:181
InputFile::streams
InputStream ** streams
Definition: ffmpeg.h:408
insert_filter
static int insert_filter(AVFilterContext **last_filter, int *pad_idx, const char *filter_name, const char *args)
Definition: ffmpeg_filter.c:1298
AVFilterParams::opts
AVDictionary * opts
Options to be apllied to the filter.
Definition: avfilter.h:1127
OutputFilterPriv::next_pts
int64_t next_pts
Definition: ffmpeg_filter.c:231
choose_channel_layouts
static void choose_channel_layouts(OutputFilterPriv *ofp, AVBPrint *bprint)
Definition: ffmpeg_filter.c:428
av_bprintf
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:99
ReinitReason
ReinitReason
Definition: ffmpeg_filter.c:2583
av_fifo_alloc2
AVFifo * av_fifo_alloc2(size_t nb_elems, size_t elem_size, unsigned int flags)
Allocate and initialize an AVFifo with a given element size.
Definition: fifo.c:47
AVOption::type
enum AVOptionType type
Definition: opt.h:362
AVFrame::sample_aspect_ratio
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:447
avfilter_pad_get_type
enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
Get the type of an AVFilterPad.
Definition: avfilter.c:977
AVFrame::hw_frames_ctx
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame.
Definition: frame.h:691
FrameOpaque
FrameOpaque
Definition: ffmpeg.h:87
av_get_media_type_string
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:28
AVFrame::height
int height
Definition: frame.h:412
AVStream::index
int index
stream index in AVFormatContext
Definition: avformat.h:749
DEF_CHOOSE_FORMAT
#define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name)
Definition: ffmpeg_filter.c:399
channel_layout.h
AVBufferSrcParameters
This structure contains the parameters describing the frames that will be passed to this filter.
Definition: buffersrc.h:73
av_buffersink_get_sample_rate
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
describe_filter_link
static char * describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
Definition: ffmpeg_filter.c:631
buffer
the frame and frame reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFrame structures Several references can point to the same frame buffer
Definition: filter_design.txt:49
InputFilterPriv::bound
int bound
Definition: ffmpeg_filter.c:126
AVRational::den
int den
Denominator.
Definition: rational.h:60
InputStream::file
struct InputFile * file
Definition: ffmpeg.h:349
AVFilterChain
A filterchain is a list of filter specifications.
Definition: avfilter.h:1142
InputFilterPriv::frame_queue
AVFifo * frame_queue
Definition: ffmpeg_filter.c:141
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:72
atomic_fetch_add
#define atomic_fetch_add(object, operand)
Definition: stdatomic.h:131
avfilter.h
InputFilterPriv::type_src
enum AVMediaType type_src
Definition: ffmpeg_filter.c:123
av_channel_layout_uninit
void av_channel_layout_uninit(AVChannelLayout *channel_layout)
Free any allocated data in the channel layout and reset the channel count to 0.
Definition: channel_layout.c:432
FilterGraphPriv::is_meta
int is_meta
Definition: ffmpeg_filter.c:54
IFILTER_FLAG_CFR
@ IFILTER_FLAG_CFR
Definition: ffmpeg.h:241
AVFILTER_FLAG_METADATA_ONLY
#define AVFILTER_FLAG_METADATA_ONLY
The filter is a "metadata" filter - it does not modify the frame data in any way.
Definition: avfilter.h:133
avio_read
int avio_read(AVIOContext *s, unsigned char *buf, int size)
Read size bytes from AVIOContext into buf.
Definition: aviobuf.c:611
choose_out_timebase
static int choose_out_timebase(OutputFilterPriv *ofp, AVFrame *frame)
Definition: ffmpeg_filter.c:1944
OutputFilterPriv::sample_rates
const int * sample_rates
Definition: ffmpeg_filter.c:226
AVERROR_FILTER_NOT_FOUND
#define AVERROR_FILTER_NOT_FOUND
Filter not found.
Definition: error.h:60
sub2video_copy_rect
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg_filter.c:282
AVFilterGraphSegment::nb_chains
size_t nb_chains
Definition: avfilter.h:1168
AVFilterContext
An instance of a filter.
Definition: avfilter.h:407
FilterGraph::class
const AVClass * class
Definition: ffmpeg.h:287
av_channel_layout_copy
int av_channel_layout_copy(AVChannelLayout *dst, const AVChannelLayout *src)
Make a copy of a channel layout.
Definition: channel_layout.c:439
OutputFilter
Definition: ffmpeg.h:271
sub2video_frame
static int sub2video_frame(InputFilter *ifilter, AVFrame *frame, int buffer)
Definition: ffmpeg_filter.c:2481
InputFilterPriv::ifilter
InputFilter ifilter
Definition: ffmpeg_filter.c:104
AVIO_FLAG_READ
#define AVIO_FLAG_READ
read-only
Definition: avio.h:617
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:270
desc
const char * desc
Definition: libsvtav1.c:73
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
av_buffersrc_get_nb_failed_requests
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:313
AVFILTER_AUTO_CONVERT_NONE
@ AVFILTER_AUTO_CONVERT_NONE
all automatic conversions disabled
Definition: avfilter.h:929
AVBufferRef
A reference to a data buffer.
Definition: buffer.h:82
avio_open2
int avio_open2(AVIOContext **s, const char *filename, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: avio.c:490
OutputFilter::nb_frames_drop
atomic_uint_least64_t nb_frames_drop
Definition: ffmpeg.h:283
InputFilterPriv::sub2video
struct InputFilterPriv::@7 sub2video
auto_conversion_filters
int auto_conversion_filters
Definition: ffmpeg_opt.c:85
llrint
#define llrint(x)
Definition: libm.h:394
AVFrameSideData
Structure to hold side data for an AVFrame.
Definition: frame.h:246
OutputStream::is_cfr
int is_cfr
Definition: ffmpeg.h:527
AVPixFmtDescriptor
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
OutputStream::autoscale
int autoscale
Definition: ffmpeg.h:532
InputStream::index
int index
Definition: ffmpeg.h:351
sch_filter_receive_finish
void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
Called by filter tasks to signal that a filter input will no longer accept input.
Definition: ffmpeg_sched.c:2404
av_free
#define av_free(p)
Definition: tableprint_vlc.h:33
AVDictionaryEntry
Definition: dict.h:89
ENC_TIME_BASE_FILTER
@ ENC_TIME_BASE_FILTER
Definition: ffmpeg.h:78
FilterCommand::target
char * target
Definition: ffmpeg_filter.c:241
AV_ROUND_PASS_MINMAX
@ AV_ROUND_PASS_MINMAX
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:159
fg_class
static const AVClass fg_class
Definition: ffmpeg_filter.c:966
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:34
src
INIT_CLIP pixel * src
Definition: h264pred_template.c:418
av_dict_get_string
int av_dict_get_string(const AVDictionary *m, char **buffer, const char key_val_sep, const char pairs_sep)
Get dictionary entries as a string.
Definition: dict.c:250
Decoder::type
enum AVMediaType type
Definition: ffmpeg.h:334
InputFilterPriv::format
int format
Definition: ffmpeg_filter.c:129
InputFilterPriv::end_pts
int64_t end_pts
marks if sub2video_update should force an initialization
Definition: ffmpeg_filter.c:168
nb_filtergraphs
int nb_filtergraphs
Definition: ffmpeg.c:132
d
d
Definition: ffmpeg_filter.c:425
int32_t
int32_t
Definition: audioconvert.c:56
sub2video_update
static void sub2video_update(InputFilterPriv *ifp, int64_t heartbeat_pts, const AVSubtitle *sub)
Definition: ffmpeg_filter.c:328
imgutils.h
timestamp.h
OutputStream
Definition: mux.c:53
AVERROR_BUG
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:52
avio_close
int avio_close(AVIOContext *s)
Close the resource accessed by the AVIOContext s and free it.
Definition: avio.c:615
OutputStream::st
AVStream * st
Definition: mux.c:54
AVFrame::linesize
int linesize[AV_NUM_DATA_POINTERS]
For video, a positive or negative value, which is typically indicating the size in bytes of each pict...
Definition: frame.h:385
OutputFilterPriv::format
int format
Definition: ffmpeg_filter.c:208
avfilter_graph_send_command
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
Definition: avfiltergraph.c:1337
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:27
InputFilterPriv::opts
InputFilterOptions opts
Definition: ffmpeg_filter.c:106
dts_error_threshold
float dts_error_threshold
Definition: ffmpeg_opt.c:63
av_fifo_freep2
void av_fifo_freep2(AVFifo **f)
Free an AVFifo and reset pointer to NULL.
Definition: fifo.c:286
AVERROR_INVALIDDATA
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:61
av_ts2str
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
ifilter_bind_dec
static int ifilter_bind_dec(InputFilterPriv *ifp, Decoder *dec)
Definition: ffmpeg_filter.c:713
h
h
Definition: vp9dsp_template.c:2038
av_bprint_chars
void av_bprint_chars(AVBPrint *buf, char c, unsigned n)
Append char c n times to a print buffer.
Definition: bprint.c:145
hw_device_for_filter
AVBufferRef * hw_device_for_filter(void)
Get a hardware device to be used with this filtergraph.
Definition: ffmpeg_hw.c:300
AVDictionaryEntry::value
char * value
Definition: dict.h:91
AVFilterGraph::nb_filters
unsigned nb_filters
Definition: avfilter.h:816
avstring.h
AVFilterContext::filter
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:410
AVColorRange
AVColorRange
Visual content value range.
Definition: pixfmt.h:648
OutputFile::recording_time
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:585
frame_data_c
const FrameData * frame_data_c(AVFrame *frame)
Definition: ffmpeg.c:479
OutputFilterPriv::tb_out
AVRational tb_out
Definition: ffmpeg_filter.c:215
AVFilterInOut
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:956
VSYNC_PASSTHROUGH
@ VSYNC_PASSTHROUGH
Definition: ffmpeg.h:67
OutputFilterPriv::height
int height
Definition: ffmpeg_filter.c:209
snprintf
#define snprintf
Definition: snprintf.h:34
SCH_FILTER_IN
#define SCH_FILTER_IN(filter, input)
Definition: ffmpeg_sched.h:119
FPSConvContext::framerate
AVRational framerate
Definition: ffmpeg_filter.c:194
av_log2
int av_log2(unsigned v)
Definition: intmath.c:26
buffersrc.h
fg_thread_set_name
static void fg_thread_set_name(const FilterGraph *fg)
Definition: ffmpeg_filter.c:2717
ist_find_unused
InputStream * ist_find_unused(enum AVMediaType type)
Find an unused input stream of given type.
Definition: ffmpeg_demux.c:153
sub2video_prepare
static void sub2video_prepare(InputFilterPriv *ifp)
Definition: ffmpeg_filter.c:1510
av_rescale_q_rnd
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
av_dict_iterate
const AVDictionaryEntry * av_dict_iterate(const AVDictionary *m, const AVDictionaryEntry *prev)
Iterate over a dictionary.
Definition: dict.c:44
AVSubtitle::start_display_time
uint32_t start_display_time
Definition: avcodec.h:2216
FilterCommand::command
char * command
Definition: ffmpeg_filter.c:242
FilterCommand
Definition: ffmpeg_filter.c:240
AV_FIFO_FLAG_AUTO_GROW
#define AV_FIFO_FLAG_AUTO_GROW
Automatically resize the FIFO on writes, so that the data fits.
Definition: fifo.h:67
InputFilterPriv::height
int height
Definition: ffmpeg_filter.c:131
av_get_pix_fmt_name
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2882
OutputFilter::nb_frames_dup
atomic_uint_least64_t nb_frames_dup
Definition: ffmpeg.h:282
filter_complex_nbthreads
int filter_complex_nbthreads
Definition: ffmpeg_opt.c:83
OutputFile
Definition: ffmpeg.h:574
InputFilterOptions::framerate
AVRational framerate
Definition: ffmpeg.h:255
ff_thread_setname
static int ff_thread_setname(const char *name)
Definition: thread.h:216
LATENCY_PROBE_FILTER_POST
@ LATENCY_PROBE_FILTER_POST
Definition: ffmpeg.h:103
FPSConvContext::framerate_supported
const AVRational * framerate_supported
Definition: ffmpeg_filter.c:196